hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
2d4f484e36d48876b3190174f568da627749d001.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/mr/device/allocator.hpp> #include <random> #include <vector> #include <linalg_naive.h> #include <raft/cudart_utils.h> #include <test_utils.h> #include <linalg/batched/matrix.cuh> #include <sparse/batched/csr.cuh> #include "../test_utils.h" namespace MLCommon { namespace Sparse { namespace Batched { enum CSROperation { SpMV_op, SpMM_op }; template <typename T> struct CSRInputs { CSROperation operation; int batch_size; int m; // Dimensions of A int n; int nnz; // Number of non-zero elements in A int p; // Dimensions of B or x int q; T alpha; // Scalars T beta; T tolerance; }; template <typename T> class CSRTest : public ::testing::TestWithParam<CSRInputs<T>> { protected: void SetUp() override { using std::vector; params = ::testing::TestWithParam<CSRInputs<T>>::GetParam(); // Check if the dimensions are valid and compute the output dimensions int m_r, n_r; switch (params.operation) { case SpMV_op: ASSERT_TRUE(params.n == params.p); ASSERT_TRUE(params.q == 1); m_r = params.m; n_r = 1; break; case SpMM_op: ASSERT_TRUE(params.n == params.p); m_r = params.m; n_r = params.q; break; } // Create test matrices/vectors std::vector<T> A; std::vector<T> Bx; A.resize(params.batch_size * params.m * params.n, (T)0.0); Bx.resize(params.batch_size * params.p * params.q); std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<T> idis(0, params.m * params.n - 1); std::uniform_real_distribution<T> udis(-1.0, 3.0); // Generate a random sparse matrix (with dense representation) std::vector<bool> mask = std::vector<bool>(params.m * params.n, false); for (int idx = 0; idx < params.nnz; idx++) { int k; do { k = idis(gen); } while (mask[k]); mask[k] = true; int i = k % params.m; int j = k / params.m; for (int bid = 0; bid < params.batch_size; bid++) { A[bid * params.m * params.n + j * params.m + i] = udis(gen); } } // Generate random dense matrices/vectors for (int i = 0; i < Bx.size(); i++) Bx[i] = udis(gen); res_h.resize(params.batch_size * m_r * n_r); for (int i = 0; i < res_h.size(); i++) res_h[i] = udis(gen); // Create handles, stream, allocator CUBLAS_CHECK(hipblasCreate(&handle)); CUDA_CHECK(hipStreamCreate(&stream)); CUSOLVER_CHECK(cusolverSpCreate(&cusolverSpHandle)); auto allocator = std::make_shared<raft::mr::device::default_allocator>(); // Created batched dense matrices LinAlg::Batched::Matrix<T> AbM(params.m, params.n, params.batch_size, handle, allocator, stream); LinAlg::Batched::Matrix<T> BxbM(params.p, params.q, params.batch_size, handle, allocator, stream); // Create matrix that will hold the results res_bM = new LinAlg::Batched::Matrix<T>(m_r, n_r, params.batch_size, handle, allocator, stream); // Copy the data to the device raft::update_device(AbM.raw_data(), A.data(), A.size(), stream); raft::update_device(BxbM.raw_data(), Bx.data(), Bx.size(), stream); raft::update_device(res_bM->raw_data(), res_h.data(), res_h.size(), stream); // Create sparse matrix A from the dense A and the mask CSR<T> AbS = CSR<T>::from_dense(AbM, mask, cusolverSpHandle); // Compute the tested results switch (params.operation) { case SpMV_op: b_spmv(params.alpha, AbS, BxbM, params.beta, *res_bM); break; case SpMM_op: b_spmm(params.alpha, AbS, BxbM, params.beta, *res_bM); break; } // Compute the expected results switch (params.operation) { case SpMV_op: for (int bid = 0; bid < params.batch_size; bid++) { LinAlg::Naive::matMul(res_h.data() + bid * m_r, A.data() + bid * params.m * params.n, Bx.data() + bid * params.p, params.m, params.n, 1, params.alpha, params.beta); } break; case SpMM_op: for (int bid = 0; bid < params.batch_size; bid++) { LinAlg::Naive::matMul(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, Bx.data() + bid * params.p * params.q, params.m, params.n, params.q, params.alpha, params.beta); } break; } CUDA_CHECK(hipStreamSynchronize(stream)); } void TearDown() override { delete res_bM; CUBLAS_CHECK(hipblasDestroy(handle)); CUDA_CHECK(hipStreamDestroy(stream)); CUSOLVER_CHECK(cusolverSpDestroy(cusolverSpHandle)); } protected: CSRInputs<T> params; LinAlg::Batched::Matrix<T> *res_bM; std::vector<T> res_h; hipblasHandle_t handle; cusolverSpHandle_t cusolverSpHandle; hipStream_t stream; }; // Test parameters (op, batch_size, m, n, nnz, p, q, tolerance) const std::vector<CSRInputs<double>> inputsd = { {SpMV_op, 1, 90, 150, 440, 150, 1, 1.0, 0.0, 1e-6}, {SpMV_op, 5, 13, 12, 75, 12, 1, -1.0, 1.0, 1e-6}, {SpMV_op, 15, 8, 4, 6, 4, 1, 0.5, 0.5, 1e-6}, {SpMV_op, 33, 7, 7, 23, 7, 1, -0.5, -0.5, 1e-6}, {SpMM_op, 1, 20, 15, 55, 15, 30, 1.0, 0.0, 1e-6}, {SpMM_op, 9, 10, 9, 31, 9, 11, -1.0, 0.5, 1e-6}, {SpMM_op, 20, 7, 12, 11, 12, 13, 0.5, 0.5, 1e-6}}; // Test parameters (op, batch_size, m, n, nnz, p, q, tolerance) const std::vector<CSRInputs<float>> inputsf = { {SpMV_op, 1, 90, 150, 440, 150, 1, 1.0f, 0.0f, 1e-2}, {SpMV_op, 5, 13, 12, 75, 12, 1, -1.0f, 1.0f, 1e-2}, {SpMV_op, 15, 8, 4, 6, 4, 1, 0.5f, 0.5f, 1e-2}, {SpMV_op, 33, 7, 7, 23, 7, 1, -0.5f, -0.5f, 1e-2}, {SpMM_op, 1, 20, 15, 55, 15, 30, 1.0f, 0.0f, 1e-2}, {SpMM_op, 9, 10, 9, 31, 9, 11, -1.0f, 0.5f, 1e-2}, {SpMM_op, 20, 7, 12, 11, 12, 13, 0.5f, 0.5f, 1e-2}}; using BatchedCSRTestD = CSRTest<double>; using BatchedCSRTestF = CSRTest<float>; TEST_P(BatchedCSRTestD, Result) { ASSERT_TRUE(devArrMatchHost(res_h.data(), res_bM->raw_data(), res_h.size(), raft::CompareApprox<double>(params.tolerance), stream)); } TEST_P(BatchedCSRTestF, Result) { ASSERT_TRUE(devArrMatchHost(res_h.data(), res_bM->raw_data(), res_h.size(), raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(BatchedCSRTests, BatchedCSRTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(BatchedCSRTests, BatchedCSRTestF, ::testing::ValuesIn(inputsf)); } // namespace Batched } // namespace Sparse } // namespace MLCommon
2d4f484e36d48876b3190174f568da627749d001.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/mr/device/allocator.hpp> #include <random> #include <vector> #include <linalg_naive.h> #include <raft/cudart_utils.h> #include <test_utils.h> #include <linalg/batched/matrix.cuh> #include <sparse/batched/csr.cuh> #include "../test_utils.h" namespace MLCommon { namespace Sparse { namespace Batched { enum CSROperation { SpMV_op, SpMM_op }; template <typename T> struct CSRInputs { CSROperation operation; int batch_size; int m; // Dimensions of A int n; int nnz; // Number of non-zero elements in A int p; // Dimensions of B or x int q; T alpha; // Scalars T beta; T tolerance; }; template <typename T> class CSRTest : public ::testing::TestWithParam<CSRInputs<T>> { protected: void SetUp() override { using std::vector; params = ::testing::TestWithParam<CSRInputs<T>>::GetParam(); // Check if the dimensions are valid and compute the output dimensions int m_r, n_r; switch (params.operation) { case SpMV_op: ASSERT_TRUE(params.n == params.p); ASSERT_TRUE(params.q == 1); m_r = params.m; n_r = 1; break; case SpMM_op: ASSERT_TRUE(params.n == params.p); m_r = params.m; n_r = params.q; break; } // Create test matrices/vectors std::vector<T> A; std::vector<T> Bx; A.resize(params.batch_size * params.m * params.n, (T)0.0); Bx.resize(params.batch_size * params.p * params.q); std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<T> idis(0, params.m * params.n - 1); std::uniform_real_distribution<T> udis(-1.0, 3.0); // Generate a random sparse matrix (with dense representation) std::vector<bool> mask = std::vector<bool>(params.m * params.n, false); for (int idx = 0; idx < params.nnz; idx++) { int k; do { k = idis(gen); } while (mask[k]); mask[k] = true; int i = k % params.m; int j = k / params.m; for (int bid = 0; bid < params.batch_size; bid++) { A[bid * params.m * params.n + j * params.m + i] = udis(gen); } } // Generate random dense matrices/vectors for (int i = 0; i < Bx.size(); i++) Bx[i] = udis(gen); res_h.resize(params.batch_size * m_r * n_r); for (int i = 0; i < res_h.size(); i++) res_h[i] = udis(gen); // Create handles, stream, allocator CUBLAS_CHECK(cublasCreate(&handle)); CUDA_CHECK(cudaStreamCreate(&stream)); CUSOLVER_CHECK(cusolverSpCreate(&cusolverSpHandle)); auto allocator = std::make_shared<raft::mr::device::default_allocator>(); // Created batched dense matrices LinAlg::Batched::Matrix<T> AbM(params.m, params.n, params.batch_size, handle, allocator, stream); LinAlg::Batched::Matrix<T> BxbM(params.p, params.q, params.batch_size, handle, allocator, stream); // Create matrix that will hold the results res_bM = new LinAlg::Batched::Matrix<T>(m_r, n_r, params.batch_size, handle, allocator, stream); // Copy the data to the device raft::update_device(AbM.raw_data(), A.data(), A.size(), stream); raft::update_device(BxbM.raw_data(), Bx.data(), Bx.size(), stream); raft::update_device(res_bM->raw_data(), res_h.data(), res_h.size(), stream); // Create sparse matrix A from the dense A and the mask CSR<T> AbS = CSR<T>::from_dense(AbM, mask, cusolverSpHandle); // Compute the tested results switch (params.operation) { case SpMV_op: b_spmv(params.alpha, AbS, BxbM, params.beta, *res_bM); break; case SpMM_op: b_spmm(params.alpha, AbS, BxbM, params.beta, *res_bM); break; } // Compute the expected results switch (params.operation) { case SpMV_op: for (int bid = 0; bid < params.batch_size; bid++) { LinAlg::Naive::matMul(res_h.data() + bid * m_r, A.data() + bid * params.m * params.n, Bx.data() + bid * params.p, params.m, params.n, 1, params.alpha, params.beta); } break; case SpMM_op: for (int bid = 0; bid < params.batch_size; bid++) { LinAlg::Naive::matMul(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, Bx.data() + bid * params.p * params.q, params.m, params.n, params.q, params.alpha, params.beta); } break; } CUDA_CHECK(cudaStreamSynchronize(stream)); } void TearDown() override { delete res_bM; CUBLAS_CHECK(cublasDestroy(handle)); CUDA_CHECK(cudaStreamDestroy(stream)); CUSOLVER_CHECK(cusolverSpDestroy(cusolverSpHandle)); } protected: CSRInputs<T> params; LinAlg::Batched::Matrix<T> *res_bM; std::vector<T> res_h; cublasHandle_t handle; cusolverSpHandle_t cusolverSpHandle; cudaStream_t stream; }; // Test parameters (op, batch_size, m, n, nnz, p, q, tolerance) const std::vector<CSRInputs<double>> inputsd = { {SpMV_op, 1, 90, 150, 440, 150, 1, 1.0, 0.0, 1e-6}, {SpMV_op, 5, 13, 12, 75, 12, 1, -1.0, 1.0, 1e-6}, {SpMV_op, 15, 8, 4, 6, 4, 1, 0.5, 0.5, 1e-6}, {SpMV_op, 33, 7, 7, 23, 7, 1, -0.5, -0.5, 1e-6}, {SpMM_op, 1, 20, 15, 55, 15, 30, 1.0, 0.0, 1e-6}, {SpMM_op, 9, 10, 9, 31, 9, 11, -1.0, 0.5, 1e-6}, {SpMM_op, 20, 7, 12, 11, 12, 13, 0.5, 0.5, 1e-6}}; // Test parameters (op, batch_size, m, n, nnz, p, q, tolerance) const std::vector<CSRInputs<float>> inputsf = { {SpMV_op, 1, 90, 150, 440, 150, 1, 1.0f, 0.0f, 1e-2}, {SpMV_op, 5, 13, 12, 75, 12, 1, -1.0f, 1.0f, 1e-2}, {SpMV_op, 15, 8, 4, 6, 4, 1, 0.5f, 0.5f, 1e-2}, {SpMV_op, 33, 7, 7, 23, 7, 1, -0.5f, -0.5f, 1e-2}, {SpMM_op, 1, 20, 15, 55, 15, 30, 1.0f, 0.0f, 1e-2}, {SpMM_op, 9, 10, 9, 31, 9, 11, -1.0f, 0.5f, 1e-2}, {SpMM_op, 20, 7, 12, 11, 12, 13, 0.5f, 0.5f, 1e-2}}; using BatchedCSRTestD = CSRTest<double>; using BatchedCSRTestF = CSRTest<float>; TEST_P(BatchedCSRTestD, Result) { ASSERT_TRUE(devArrMatchHost(res_h.data(), res_bM->raw_data(), res_h.size(), raft::CompareApprox<double>(params.tolerance), stream)); } TEST_P(BatchedCSRTestF, Result) { ASSERT_TRUE(devArrMatchHost(res_h.data(), res_bM->raw_data(), res_h.size(), raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(BatchedCSRTests, BatchedCSRTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(BatchedCSRTests, BatchedCSRTestF, ::testing::ValuesIn(inputsf)); } // namespace Batched } // namespace Sparse } // namespace MLCommon
0f487a6f37ecebbea3321226dfdc17fcedb9d383.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (C) 2020 THL A29 Limited, a Tencent company. // All rights reserved. // Licensed under the BSD 3-Clause License (the "License"); you may // not use this file except in compliance with the License. You may // obtain a copy of the License at // https://opensource.org/licenses/BSD-3-Clause // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" basis, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. // See the AUTHORS file for names of contributors. #include <hip/hip_runtime.h> #include <numeric> #include "turbo_transformers/layers/kernels/gpu_embedding_kernel.h" namespace turbo_transformers { namespace layers { namespace kernels { template <bool IsAdd> static __global__ void lookup(float* dst, const float* embedding_table, const int64_t* ids, int64_t vocab_size) { int64_t id = ids[blockIdx.x]; int hidden_idx = threadIdx.x; int hidden_size = blockDim.x; // TODO(jiaruifang): There should have a checker to check the range of id. if (id >= vocab_size) { asm("trap;"); } #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ > 300 float val = __ldg(&embedding_table[id * hidden_size + hidden_idx]); #else float val = embedding_table[id * hidden_size + hidden_idx]; #endif if (IsAdd) { dst[blockIdx.x * hidden_size + hidden_idx] += val; } else { dst[blockIdx.x * hidden_size + hidden_idx] = val; } } template <bool Add> void GPULookupKernel(float* dst, const float* embedding_table, const int64_t* ids, int64_t vocab_size, int64_t hidden_size, int64_t num_ids, hipStream_t stream) { dim3 grid(num_ids); dim3 block(hidden_size); if (block.x > 1024) { throw std::runtime_error( "GPULookupKernel currently does not support a hidden_size larger than " "1024"); } hipLaunchKernelGGL(( lookup<Add>) , dim3(grid), dim3(block), 0, stream, dst, embedding_table, ids, vocab_size); } template void GPULookupKernel<true>(float* dst, const float* embedding_table, const int64_t* ids, int64_t vocab_size, int64_t hidden_size, int64_t num_ids, hipStream_t stream); template void GPULookupKernel<false>(float* dst, const float* embedding_table, const int64_t* ids, int64_t vocab_size, int64_t hidden_size, int64_t num_ids, hipStream_t stream); } // namespace kernels } // namespace layers } // namespace turbo_transformers
0f487a6f37ecebbea3321226dfdc17fcedb9d383.cu
// Copyright (C) 2020 THL A29 Limited, a Tencent company. // All rights reserved. // Licensed under the BSD 3-Clause License (the "License"); you may // not use this file except in compliance with the License. You may // obtain a copy of the License at // https://opensource.org/licenses/BSD-3-Clause // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" basis, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. // See the AUTHORS file for names of contributors. #include <cuda_runtime.h> #include <numeric> #include "turbo_transformers/layers/kernels/gpu_embedding_kernel.h" namespace turbo_transformers { namespace layers { namespace kernels { template <bool IsAdd> static __global__ void lookup(float* dst, const float* embedding_table, const int64_t* ids, int64_t vocab_size) { int64_t id = ids[blockIdx.x]; int hidden_idx = threadIdx.x; int hidden_size = blockDim.x; // TODO(jiaruifang): There should have a checker to check the range of id. if (id >= vocab_size) { asm("trap;"); } #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ > 300 float val = __ldg(&embedding_table[id * hidden_size + hidden_idx]); #else float val = embedding_table[id * hidden_size + hidden_idx]; #endif if (IsAdd) { dst[blockIdx.x * hidden_size + hidden_idx] += val; } else { dst[blockIdx.x * hidden_size + hidden_idx] = val; } } template <bool Add> void GPULookupKernel(float* dst, const float* embedding_table, const int64_t* ids, int64_t vocab_size, int64_t hidden_size, int64_t num_ids, cudaStream_t stream) { dim3 grid(num_ids); dim3 block(hidden_size); if (block.x > 1024) { throw std::runtime_error( "GPULookupKernel currently does not support a hidden_size larger than " "1024"); } lookup<Add> <<<grid, block, 0, stream>>>(dst, embedding_table, ids, vocab_size); } template void GPULookupKernel<true>(float* dst, const float* embedding_table, const int64_t* ids, int64_t vocab_size, int64_t hidden_size, int64_t num_ids, cudaStream_t stream); template void GPULookupKernel<false>(float* dst, const float* embedding_table, const int64_t* ids, int64_t vocab_size, int64_t hidden_size, int64_t num_ids, cudaStream_t stream); } // namespace kernels } // namespace layers } // namespace turbo_transformers
cf0488b857ecf8a35e3fefff7ff5c6b98086aab8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #include <stdlib.h> using namespace std; __global__ void matrixMultiplicationKernel(double* A, double* B, double* C, int N) { int ROW = blockIdx.y*blockDim.y+threadIdx.y; int COL = blockIdx.x*blockDim.x+threadIdx.x; double tmpSum = 0; if (ROW < N && COL < N) { // each thread computes one element of the block sub-matrix for (int i = 0; i < N; i++) { tmpSum += A[ROW * N + i] * B[i * N + COL]; } } C[ROW * N + COL] = tmpSum; } void matrixMultiplication(double *A, double *B, double *C, int N){ // declare the number of blocks per grid and the number of threads per block // use 1 to 512 threads per block dim3 threadsPerBlock(N, N); dim3 blocksPerGrid(1, 1); if (N*N > 512){ threadsPerBlock.x = 512; threadsPerBlock.y = 512; blocksPerGrid.x = ceil(double(N)/double(threadsPerBlock.x)); blocksPerGrid.y = ceil(double(N)/double(threadsPerBlock.y)); } hipLaunchKernelGGL(( matrixMultiplicationKernel), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, A, B, C, N); }
cf0488b857ecf8a35e3fefff7ff5c6b98086aab8.cu
#include "kernel.h" #include <stdlib.h> using namespace std; __global__ void matrixMultiplicationKernel(double* A, double* B, double* C, int N) { int ROW = blockIdx.y*blockDim.y+threadIdx.y; int COL = blockIdx.x*blockDim.x+threadIdx.x; double tmpSum = 0; if (ROW < N && COL < N) { // each thread computes one element of the block sub-matrix for (int i = 0; i < N; i++) { tmpSum += A[ROW * N + i] * B[i * N + COL]; } } C[ROW * N + COL] = tmpSum; } void matrixMultiplication(double *A, double *B, double *C, int N){ // declare the number of blocks per grid and the number of threads per block // use 1 to 512 threads per block dim3 threadsPerBlock(N, N); dim3 blocksPerGrid(1, 1); if (N*N > 512){ threadsPerBlock.x = 512; threadsPerBlock.y = 512; blocksPerGrid.x = ceil(double(N)/double(threadsPerBlock.x)); blocksPerGrid.y = ceil(double(N)/double(threadsPerBlock.y)); } matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(A, B, C, N); }
80085c6e18bbb45659cd9b9a03c105417222d52a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - Neither the name(s) of the copyright holder(s) nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <stdio.h> #include <unordered_map> #include <vector> #include <hip/hip_runtime.h> #include <cutensor.h> #define HANDLE_ERROR(x) \ { const auto err = x; \ if( err != CUTENSOR_STATUS_SUCCESS ) \ { printf("Error: %s\n", cutensorGetErrorString(err)); return err; } \ }; #define HANDLE_CUDA_ERROR(x) \ { const auto err = x; \ if( err != hipSuccess ) \ { printf("Error: %s\n", hipGetErrorString(err)); return err; } \ }; /* This routine computes the tensor contraction \f[ D = alpha * A * B + beta * C \f] using the staged-API */ cutensorStatus_t cutensorContractionSimple(const cutensorHandle_t* handle, const void* alpha, const void *A, const cutensorTensorDescriptor_t* descA, const int32_t modeA[], const void *B, const cutensorTensorDescriptor_t* descB, const int32_t modeB[], const void* beta, const void *C, const cutensorTensorDescriptor_t* descC, const int32_t modeC[], void *D, const cutensorTensorDescriptor_t* descD, const int32_t modeD[], cutensorComputeType_t typeCompute, cutensorAlgo_t algo, cutensorWorksizePreference_t workPref, hipStream_t stream) { /********************************************** * Retrieve the memory alignment for each tensor **********************************************/ uint32_t alignmentRequirementA; HANDLE_ERROR(cutensorGetAlignmentRequirement(handle, A, descA, &alignmentRequirementA)); uint32_t alignmentRequirementB; HANDLE_ERROR(cutensorGetAlignmentRequirement(handle, B, descB, &alignmentRequirementB)); uint32_t alignmentRequirementC; HANDLE_ERROR(cutensorGetAlignmentRequirement(handle, C, descC, &alignmentRequirementC)); uint32_t alignmentRequirementD; HANDLE_ERROR(cutensorGetAlignmentRequirement(handle, D, descD, &alignmentRequirementD)); /******************************* * Create Contraction Descriptor *******************************/ cutensorContractionDescriptor_t desc; HANDLE_ERROR(cutensorInitContractionDescriptor(handle, &desc, descA, modeA, alignmentRequirementA, descB, modeB, alignmentRequirementB, descC, modeC, alignmentRequirementC, descD, modeD, alignmentRequirementD, typeCompute)); /************************** * Set the algorithm to use ***************************/ cutensorContractionFind_t find; HANDLE_ERROR(cutensorInitContractionFind( handle, &find, algo)); /********************** * Query workspace **********************/ size_t worksize = 0; HANDLE_ERROR(cutensorContractionGetWorkspaceSize(handle, &desc, &find, workPref, &worksize)); void *work = nullptr; if (worksize > 0) { if(hipSuccess != hipMalloc(&work, worksize)) { work = nullptr; worksize = 0; } } /************************** * Create Contraction Plan **************************/ cutensorContractionPlan_t plan; HANDLE_ERROR(cutensorInitContractionPlan(handle, &plan, &desc, &find, worksize)); /********************** * Run **********************/ HANDLE_ERROR(cutensorContraction(handle, &plan, alpha, A, B, beta, C, D, work, worksize, stream)); return CUTENSOR_STATUS_SUCCESS; } int main() { typedef float floatTypeA; typedef float floatTypeB; typedef float floatTypeC; typedef float floatTypeCompute; hipDataType typeA = HIP_R_32F; hipDataType typeB = HIP_R_32F; hipDataType typeC = HIP_R_32F; cutensorComputeType_t typeCompute = CUTENSOR_COMPUTE_32F; floatTypeCompute alpha = (floatTypeCompute) 1.1f; floatTypeCompute beta = (floatTypeCompute) 0.f; /********************** * Computing: C_{m,u,n,v} = alpha * A_{m,h,k,n} B_{u,k,v,h} + beta * C_{m,u,n,v} **********************/ std::vector<int> modeC{'m','u','n','v'}; std::vector<int> modeA{'m','h','k','n'}; std::vector<int> modeB{'u','k','v','h'}; int nmodeA = modeA.size(); int nmodeB = modeB.size(); int nmodeC = modeC.size(); std::unordered_map<int, int64_t> extent; extent['m'] = 96; extent['n'] = 96; extent['u'] = 96; extent['v'] = 64; extent['h'] = 64; extent['k'] = 64; double gflops = (2.0 * extent['m'] * extent['n'] * extent['u'] * extent['v'] * extent['k'] * extent['h']) /1e9; std::vector<int64_t> extentC; for (auto mode : modeC) extentC.push_back(extent[mode]); std::vector<int64_t> extentA; for (auto mode : modeA) extentA.push_back(extent[mode]); std::vector<int64_t> extentB; for (auto mode : modeB) extentB.push_back(extent[mode]); /********************** * Allocating data **********************/ size_t elementsA = 1; for (auto mode : modeA) elementsA *= extent[mode]; size_t elementsB = 1; for (auto mode : modeB) elementsB *= extent[mode]; size_t elementsC = 1; for (auto mode : modeC) elementsC *= extent[mode]; size_t sizeA = sizeof(floatTypeA) * elementsA; size_t sizeB = sizeof(floatTypeB) * elementsB; size_t sizeC = sizeof(floatTypeC) * elementsC; printf("Total memory: %.2f GiB\n", (sizeA + sizeB + sizeC)/1024./1024./1024); void *A_d, *B_d, *C_d; HANDLE_CUDA_ERROR(hipMalloc((void**) &A_d, sizeA)); HANDLE_CUDA_ERROR(hipMalloc((void**) &B_d, sizeB)); HANDLE_CUDA_ERROR(hipMalloc((void**) &C_d, sizeC)); floatTypeA *A = (floatTypeA*) malloc(sizeof(floatTypeA) * elementsA); floatTypeB *B = (floatTypeB*) malloc(sizeof(floatTypeB) * elementsB); floatTypeC *C = (floatTypeC*) malloc(sizeof(floatTypeC) * elementsC); if (A == NULL || B == NULL || C == NULL) { printf("Error: Host allocation of A, B, or C.\n"); return -1; } /******************* * Initialize data *******************/ for (int64_t i = 0; i < elementsA; i++) A[i] = (((float) rand())/RAND_MAX - 0.5)*100; for (int64_t i = 0; i < elementsB; i++) B[i] = (((float) rand())/RAND_MAX - 0.5)*100; for (int64_t i = 0; i < elementsC; i++) C[i] = (((float) rand())/RAND_MAX - 0.5)*100; HANDLE_CUDA_ERROR(hipMemcpy(A_d, A, sizeA, hipMemcpyHostToDevice)); HANDLE_CUDA_ERROR(hipMemcpy(B_d, B, sizeB, hipMemcpyHostToDevice)); HANDLE_CUDA_ERROR(hipMemcpy(C_d, C, sizeC, hipMemcpyHostToDevice)); /************************* * cuTENSOR *************************/ cutensorHandle_t handle; HANDLE_ERROR(cutensorInit(&handle)); /********************** * Create Tensor Descriptors **********************/ cutensorTensorDescriptor_t descA; HANDLE_ERROR(cutensorInitTensorDescriptor(&handle, &descA, nmodeA, extentA.data(), NULL /* stride */, typeA, CUTENSOR_OP_IDENTITY)); cutensorTensorDescriptor_t descB; HANDLE_ERROR(cutensorInitTensorDescriptor(&handle, &descB, nmodeB, extentB.data(), NULL /* stride */, typeB, CUTENSOR_OP_IDENTITY)); cutensorTensorDescriptor_t descC; HANDLE_ERROR(cutensorInitTensorDescriptor(&handle, &descC, nmodeC, extentC.data(), NULL /* stride */, typeC, CUTENSOR_OP_IDENTITY)); HANDLE_ERROR(cutensorContractionSimple(&handle, (void*)&alpha, A_d, &descA, modeA.data(), B_d, &descB, modeB.data(), (void*)&beta, C_d, &descC, modeC.data(), C_d, &descC, modeC.data(), typeCompute, CUTENSOR_ALGO_DEFAULT, CUTENSOR_WORKSPACE_RECOMMENDED, 0 /* stream */)); return 0; }
80085c6e18bbb45659cd9b9a03c105417222d52a.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - Neither the name(s) of the copyright holder(s) nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <stdio.h> #include <unordered_map> #include <vector> #include <cuda_runtime.h> #include <cutensor.h> #define HANDLE_ERROR(x) \ { const auto err = x; \ if( err != CUTENSOR_STATUS_SUCCESS ) \ { printf("Error: %s\n", cutensorGetErrorString(err)); return err; } \ }; #define HANDLE_CUDA_ERROR(x) \ { const auto err = x; \ if( err != cudaSuccess ) \ { printf("Error: %s\n", cudaGetErrorString(err)); return err; } \ }; /* This routine computes the tensor contraction \f[ D = alpha * A * B + beta * C \f] using the staged-API */ cutensorStatus_t cutensorContractionSimple(const cutensorHandle_t* handle, const void* alpha, const void *A, const cutensorTensorDescriptor_t* descA, const int32_t modeA[], const void *B, const cutensorTensorDescriptor_t* descB, const int32_t modeB[], const void* beta, const void *C, const cutensorTensorDescriptor_t* descC, const int32_t modeC[], void *D, const cutensorTensorDescriptor_t* descD, const int32_t modeD[], cutensorComputeType_t typeCompute, cutensorAlgo_t algo, cutensorWorksizePreference_t workPref, cudaStream_t stream) { /********************************************** * Retrieve the memory alignment for each tensor **********************************************/ uint32_t alignmentRequirementA; HANDLE_ERROR(cutensorGetAlignmentRequirement(handle, A, descA, &alignmentRequirementA)); uint32_t alignmentRequirementB; HANDLE_ERROR(cutensorGetAlignmentRequirement(handle, B, descB, &alignmentRequirementB)); uint32_t alignmentRequirementC; HANDLE_ERROR(cutensorGetAlignmentRequirement(handle, C, descC, &alignmentRequirementC)); uint32_t alignmentRequirementD; HANDLE_ERROR(cutensorGetAlignmentRequirement(handle, D, descD, &alignmentRequirementD)); /******************************* * Create Contraction Descriptor *******************************/ cutensorContractionDescriptor_t desc; HANDLE_ERROR(cutensorInitContractionDescriptor(handle, &desc, descA, modeA, alignmentRequirementA, descB, modeB, alignmentRequirementB, descC, modeC, alignmentRequirementC, descD, modeD, alignmentRequirementD, typeCompute)); /************************** * Set the algorithm to use ***************************/ cutensorContractionFind_t find; HANDLE_ERROR(cutensorInitContractionFind( handle, &find, algo)); /********************** * Query workspace **********************/ size_t worksize = 0; HANDLE_ERROR(cutensorContractionGetWorkspaceSize(handle, &desc, &find, workPref, &worksize)); void *work = nullptr; if (worksize > 0) { if(cudaSuccess != cudaMalloc(&work, worksize)) { work = nullptr; worksize = 0; } } /************************** * Create Contraction Plan **************************/ cutensorContractionPlan_t plan; HANDLE_ERROR(cutensorInitContractionPlan(handle, &plan, &desc, &find, worksize)); /********************** * Run **********************/ HANDLE_ERROR(cutensorContraction(handle, &plan, alpha, A, B, beta, C, D, work, worksize, stream)); return CUTENSOR_STATUS_SUCCESS; } int main() { typedef float floatTypeA; typedef float floatTypeB; typedef float floatTypeC; typedef float floatTypeCompute; cudaDataType_t typeA = CUDA_R_32F; cudaDataType_t typeB = CUDA_R_32F; cudaDataType_t typeC = CUDA_R_32F; cutensorComputeType_t typeCompute = CUTENSOR_COMPUTE_32F; floatTypeCompute alpha = (floatTypeCompute) 1.1f; floatTypeCompute beta = (floatTypeCompute) 0.f; /********************** * Computing: C_{m,u,n,v} = alpha * A_{m,h,k,n} B_{u,k,v,h} + beta * C_{m,u,n,v} **********************/ std::vector<int> modeC{'m','u','n','v'}; std::vector<int> modeA{'m','h','k','n'}; std::vector<int> modeB{'u','k','v','h'}; int nmodeA = modeA.size(); int nmodeB = modeB.size(); int nmodeC = modeC.size(); std::unordered_map<int, int64_t> extent; extent['m'] = 96; extent['n'] = 96; extent['u'] = 96; extent['v'] = 64; extent['h'] = 64; extent['k'] = 64; double gflops = (2.0 * extent['m'] * extent['n'] * extent['u'] * extent['v'] * extent['k'] * extent['h']) /1e9; std::vector<int64_t> extentC; for (auto mode : modeC) extentC.push_back(extent[mode]); std::vector<int64_t> extentA; for (auto mode : modeA) extentA.push_back(extent[mode]); std::vector<int64_t> extentB; for (auto mode : modeB) extentB.push_back(extent[mode]); /********************** * Allocating data **********************/ size_t elementsA = 1; for (auto mode : modeA) elementsA *= extent[mode]; size_t elementsB = 1; for (auto mode : modeB) elementsB *= extent[mode]; size_t elementsC = 1; for (auto mode : modeC) elementsC *= extent[mode]; size_t sizeA = sizeof(floatTypeA) * elementsA; size_t sizeB = sizeof(floatTypeB) * elementsB; size_t sizeC = sizeof(floatTypeC) * elementsC; printf("Total memory: %.2f GiB\n", (sizeA + sizeB + sizeC)/1024./1024./1024); void *A_d, *B_d, *C_d; HANDLE_CUDA_ERROR(cudaMalloc((void**) &A_d, sizeA)); HANDLE_CUDA_ERROR(cudaMalloc((void**) &B_d, sizeB)); HANDLE_CUDA_ERROR(cudaMalloc((void**) &C_d, sizeC)); floatTypeA *A = (floatTypeA*) malloc(sizeof(floatTypeA) * elementsA); floatTypeB *B = (floatTypeB*) malloc(sizeof(floatTypeB) * elementsB); floatTypeC *C = (floatTypeC*) malloc(sizeof(floatTypeC) * elementsC); if (A == NULL || B == NULL || C == NULL) { printf("Error: Host allocation of A, B, or C.\n"); return -1; } /******************* * Initialize data *******************/ for (int64_t i = 0; i < elementsA; i++) A[i] = (((float) rand())/RAND_MAX - 0.5)*100; for (int64_t i = 0; i < elementsB; i++) B[i] = (((float) rand())/RAND_MAX - 0.5)*100; for (int64_t i = 0; i < elementsC; i++) C[i] = (((float) rand())/RAND_MAX - 0.5)*100; HANDLE_CUDA_ERROR(cudaMemcpy(A_d, A, sizeA, cudaMemcpyHostToDevice)); HANDLE_CUDA_ERROR(cudaMemcpy(B_d, B, sizeB, cudaMemcpyHostToDevice)); HANDLE_CUDA_ERROR(cudaMemcpy(C_d, C, sizeC, cudaMemcpyHostToDevice)); /************************* * cuTENSOR *************************/ cutensorHandle_t handle; HANDLE_ERROR(cutensorInit(&handle)); /********************** * Create Tensor Descriptors **********************/ cutensorTensorDescriptor_t descA; HANDLE_ERROR(cutensorInitTensorDescriptor(&handle, &descA, nmodeA, extentA.data(), NULL /* stride */, typeA, CUTENSOR_OP_IDENTITY)); cutensorTensorDescriptor_t descB; HANDLE_ERROR(cutensorInitTensorDescriptor(&handle, &descB, nmodeB, extentB.data(), NULL /* stride */, typeB, CUTENSOR_OP_IDENTITY)); cutensorTensorDescriptor_t descC; HANDLE_ERROR(cutensorInitTensorDescriptor(&handle, &descC, nmodeC, extentC.data(), NULL /* stride */, typeC, CUTENSOR_OP_IDENTITY)); HANDLE_ERROR(cutensorContractionSimple(&handle, (void*)&alpha, A_d, &descA, modeA.data(), B_d, &descB, modeB.data(), (void*)&beta, C_d, &descC, modeC.data(), C_d, &descC, modeC.data(), typeCompute, CUTENSOR_ALGO_DEFAULT, CUTENSOR_WORKSPACE_RECOMMENDED, 0 /* stream */)); return 0; }
d35c4b65040c806d1df02a1f531f4aae14fc6bef.hip
// !!! This is a file automatically generated by hipify!!! #include "chainerx/cuda/cuda_device.h" #include <cstdint> #include <hip/hip_runtime.h> #include "chainerx/array.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/cuda/op_regist.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/routines/creation.h" #include "chainerx/routines/misc.h" namespace chainerx { namespace cuda { namespace { template <typename T> struct CopyImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType a, CudaType& out) { out = a; } }; class CudaCopyOp : public CopyOp { public: void Call(const Array& a, const Array& out) override { Device& device = a.device(); device.CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(CopyImpl<T>{}, a, out); }); } }; CHAINERX_REGISTER_OP_CUDA(CopyOp, CudaCopyOp); template <typename InT, typename OutT> struct AsTypeImpl { using InCudaType = cuda_internal::DataType<InT>; using OutCudaType = cuda_internal::DataType<OutT>; __device__ void operator()(int64_t /*i*/, InCudaType a, OutCudaType& out) { out = static_cast<OutCudaType>(a); } }; class CudaAsTypeOp : public AsTypeOp { public: void Call(const Array& a, const Array& out) override { Device& device = a.device(); device.CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{device.index()}; auto do_astype = [&](auto in_pt, auto out_pt) { using InT = typename decltype(in_pt)::type; using OutT = typename decltype(out_pt)::type; Elementwise<const InT, OutT>(AsTypeImpl<InT, OutT>{}, a, out); }; VisitDtype(out.dtype(), [&](auto out_pt) { VisitDtype(a.dtype(), do_astype, out_pt); }); } }; CHAINERX_REGISTER_OP_CUDA(AsTypeOp, CudaAsTypeOp); } // namespace } // namespace cuda } // namespace chainerx
d35c4b65040c806d1df02a1f531f4aae14fc6bef.cu
#include "chainerx/cuda/cuda_device.h" #include <cstdint> #include <cuda_runtime.h> #include "chainerx/array.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/cuda/op_regist.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/routines/creation.h" #include "chainerx/routines/misc.h" namespace chainerx { namespace cuda { namespace { template <typename T> struct CopyImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType a, CudaType& out) { out = a; } }; class CudaCopyOp : public CopyOp { public: void Call(const Array& a, const Array& out) override { Device& device = a.device(); device.CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(CopyImpl<T>{}, a, out); }); } }; CHAINERX_REGISTER_OP_CUDA(CopyOp, CudaCopyOp); template <typename InT, typename OutT> struct AsTypeImpl { using InCudaType = cuda_internal::DataType<InT>; using OutCudaType = cuda_internal::DataType<OutT>; __device__ void operator()(int64_t /*i*/, InCudaType a, OutCudaType& out) { out = static_cast<OutCudaType>(a); } }; class CudaAsTypeOp : public AsTypeOp { public: void Call(const Array& a, const Array& out) override { Device& device = a.device(); device.CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{device.index()}; auto do_astype = [&](auto in_pt, auto out_pt) { using InT = typename decltype(in_pt)::type; using OutT = typename decltype(out_pt)::type; Elementwise<const InT, OutT>(AsTypeImpl<InT, OutT>{}, a, out); }; VisitDtype(out.dtype(), [&](auto out_pt) { VisitDtype(a.dtype(), do_astype, out_pt); }); } }; CHAINERX_REGISTER_OP_CUDA(AsTypeOp, CudaAsTypeOp); } // namespace } // namespace cuda } // namespace chainerx
6c1f23dbd6ee6f79b63355c7c77a42ef00f91bdb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define PRECISION_z __global__ void magma_zparic_csr_kernel( magma_int_t n, magma_int_t nnz, magma_index_t *Arowidx, magma_index_t *Acolidx, const magmaDoubleComplex * __restrict__ A_val, magma_index_t *rowptr, magma_index_t *colidx, magmaDoubleComplex *val ) { int i, j; int k = (blockDim.x * blockIdx.x + threadIdx.x); // % nnz; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex s, sp; int il, iu, jl, ju; if ( k < nnz ) { i = Arowidx[k]; j = Acolidx[k]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) s = __ldg( A_val+k ); #else s = A_val[k]; #endif il = rowptr[i]; iu = rowptr[j]; while (il < rowptr[i+1] && iu < rowptr[j+1]) { sp = zero; jl = colidx[il]; ju = colidx[iu]; if (jl < ju) il++; else if (ju < jl) iu++; else { // we are going to modify this u entry sp = val[il] * val[iu]; s -= sp; il++; iu++; } } s += sp; // undo the last operation (it must be the last) // modify entry if (i == j) // diagonal val[il-1] = MAGMA_Z_MAKE( sqrt( fabs( MAGMA_Z_REAL(s) )), 0.0 ); else //sub-diagonal val[il-1] = s / val[iu-1]; } }// kernel /** Purpose ------- This routine iteratively computes an incomplete LU factorization. For reference, see: E. Chow and A. Patel: "Fine-grained Parallel Incomplete LU Factorization", SIAM Journal on Scientific Computing, 37, C169-C193 (2015). This routine was used in the ISC 2015 paper: E. Chow et al.: "Asynchronous Iterative Algorithm for Computing Incomplete Factorizations on GPUs", ISC High Performance 2015, LNCS 9137, pp. 1-16, 2015. The input format of the initial guess matrix A is Magma_CSRCOO, A_CSR is CSR or CSRCOO format. Arguments --------- @param[in] A magma_z_matrix input matrix A - initial guess (lower triangular) @param[in,out] A_CSR magma_z_matrix input/output matrix containing the IC approximation @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zparic_csr( magma_z_matrix A, magma_z_matrix A_CSR, magma_queue_t queue ) { int blocksize1 = 128; int blocksize2 = 1; int dimgrid1 = magma_ceildiv( A.nnz, blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_zparic_csr_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() , A.num_rows, A.nnz, A.rowidx, A.col, A.val, A_CSR.row, A_CSR.col, A_CSR.val ); return MAGMA_SUCCESS; }
6c1f23dbd6ee6f79b63355c7c77a42ef00f91bdb.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define PRECISION_z __global__ void magma_zparic_csr_kernel( magma_int_t n, magma_int_t nnz, magma_index_t *Arowidx, magma_index_t *Acolidx, const magmaDoubleComplex * __restrict__ A_val, magma_index_t *rowptr, magma_index_t *colidx, magmaDoubleComplex *val ) { int i, j; int k = (blockDim.x * blockIdx.x + threadIdx.x); // % nnz; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex s, sp; int il, iu, jl, ju; if ( k < nnz ) { i = Arowidx[k]; j = Acolidx[k]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) s = __ldg( A_val+k ); #else s = A_val[k]; #endif il = rowptr[i]; iu = rowptr[j]; while (il < rowptr[i+1] && iu < rowptr[j+1]) { sp = zero; jl = colidx[il]; ju = colidx[iu]; if (jl < ju) il++; else if (ju < jl) iu++; else { // we are going to modify this u entry sp = val[il] * val[iu]; s -= sp; il++; iu++; } } s += sp; // undo the last operation (it must be the last) // modify entry if (i == j) // diagonal val[il-1] = MAGMA_Z_MAKE( sqrt( fabs( MAGMA_Z_REAL(s) )), 0.0 ); else //sub-diagonal val[il-1] = s / val[iu-1]; } }// kernel /** Purpose ------- This routine iteratively computes an incomplete LU factorization. For reference, see: E. Chow and A. Patel: "Fine-grained Parallel Incomplete LU Factorization", SIAM Journal on Scientific Computing, 37, C169-C193 (2015). This routine was used in the ISC 2015 paper: E. Chow et al.: "Asynchronous Iterative Algorithm for Computing Incomplete Factorizations on GPUs", ISC High Performance 2015, LNCS 9137, pp. 1-16, 2015. The input format of the initial guess matrix A is Magma_CSRCOO, A_CSR is CSR or CSRCOO format. Arguments --------- @param[in] A magma_z_matrix input matrix A - initial guess (lower triangular) @param[in,out] A_CSR magma_z_matrix input/output matrix containing the IC approximation @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zparic_csr( magma_z_matrix A, magma_z_matrix A_CSR, magma_queue_t queue ) { int blocksize1 = 128; int blocksize2 = 1; int dimgrid1 = magma_ceildiv( A.nnz, blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_zparic_csr_kernel<<< grid, block, 0, queue->cuda_stream() >>> ( A.num_rows, A.nnz, A.rowidx, A.col, A.val, A_CSR.row, A_CSR.col, A_CSR.val ); return MAGMA_SUCCESS; }
428344411ebaee1bfa5ea7af7d03066cfa1c5183.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*codigo CUDA utilizando memoria global*/ #include "ApplySmooth_CUDA.h" __global__ void smooth_cuda(unsigned short* cuda_image, unsigned short* new_cuda_image, int rows, int cols){ int coordX = (blockIdx.x*blockDim.x)+threadIdx.x; int coordY = (blockIdx.y*blockDim.y)+threadIdx.y; int n = rows*cols; int count = 0; unsigned int sum = 0; for(int i=max(0, coordX-2); i<=min(n-1, coordX+2); i++){ for(int j=max(0, coordY-2); j<=min(n-1, coordY+2); j++){ sum += cuda_image[coord(i, j)]; count++; } } if (count > 0) new_cuda_image[coord(coordX, coordY)] = sum/count; } void smooth(unsigned short *image, int rows, int cols){ unsigned short* cuda_image; unsigned short* new_cuda_image; hipMalloc(&cuda_image, rows*cols*sizeof(unsigned short)); hipMalloc(&new_cuda_image, rows*cols*sizeof(unsigned short)); hipMemcpy(cuda_image, image, rows*cols*sizeof(unsigned short), hipMemcpyHostToDevice); dim3 threadsPerBlock(8,4); dim3 numBlocks(rows/threadsPerBlock.x, cols/threadsPerBlock.y); hipLaunchKernelGGL(( smooth_cuda), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, cuda_image, new_cuda_image, rows, cols); hipError_t cuda_error = hipGetLastError(); if (cuda_error != hipSuccess) printf("Cuda Error: %s\n", hipGetErrorString(cuda_error)); hipMemcpy(image, new_cuda_image, rows*cols*sizeof(unsigned short), hipMemcpyDeviceToHost); hipFree(cuda_image); hipFree(new_cuda_image); return; }
428344411ebaee1bfa5ea7af7d03066cfa1c5183.cu
/*codigo CUDA utilizando memoria global*/ #include "ApplySmooth_CUDA.h" __global__ void smooth_cuda(unsigned short* cuda_image, unsigned short* new_cuda_image, int rows, int cols){ int coordX = (blockIdx.x*blockDim.x)+threadIdx.x; int coordY = (blockIdx.y*blockDim.y)+threadIdx.y; int n = rows*cols; int count = 0; unsigned int sum = 0; for(int i=max(0, coordX-2); i<=min(n-1, coordX+2); i++){ for(int j=max(0, coordY-2); j<=min(n-1, coordY+2); j++){ sum += cuda_image[coord(i, j)]; count++; } } if (count > 0) new_cuda_image[coord(coordX, coordY)] = sum/count; } void smooth(unsigned short *image, int rows, int cols){ unsigned short* cuda_image; unsigned short* new_cuda_image; cudaMalloc(&cuda_image, rows*cols*sizeof(unsigned short)); cudaMalloc(&new_cuda_image, rows*cols*sizeof(unsigned short)); cudaMemcpy(cuda_image, image, rows*cols*sizeof(unsigned short), cudaMemcpyHostToDevice); dim3 threadsPerBlock(8,4); dim3 numBlocks(rows/threadsPerBlock.x, cols/threadsPerBlock.y); smooth_cuda<<<numBlocks, threadsPerBlock>>>(cuda_image, new_cuda_image, rows, cols); cudaError_t cuda_error = cudaGetLastError(); if (cuda_error != cudaSuccess) printf("Cuda Error: %s\n", cudaGetErrorString(cuda_error)); cudaMemcpy(image, new_cuda_image, rows*cols*sizeof(unsigned short), cudaMemcpyDeviceToHost); cudaFree(cuda_image); cudaFree(new_cuda_image); return; }
17bfa1a9ef711f33c32d117f860a4aed0ca2f20b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ /* * Kernel of dense matrix-matrix multiplication kernel. */ #define CHECK_ERROR(errorMessage) { \ hipError_t err = hipGetLastError(); \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, hipGetErrorString( err) );\ exit(EXIT_FAILURE); \ } \ } // Parameters of tile sizes #define TILE_SZ 16 __global__ void mysgemmNT( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta ) { float c = 0.0f; int m = blockIdx.x * blockDim.x + threadIdx.x; int n = blockIdx.y * blockDim.y + threadIdx.y; for (int i = 0; i < k; ++i) { float a = A[m + i * lda]; float b = B[n + i * ldb]; c += a * b; } C[m+n*ldc] = C[m+n*ldc] * beta + alpha * c; } void basicSgemm( char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc ) { if ((transa != 'N') && (transa != 'n')) { std::cerr << "unsupported value of 'transa' in regtileSgemm()" << std::endl; return; } if ((transb != 'T') && (transb != 't')) { std::cerr << "unsupported value of 'transb' in regtileSgemm()" << std::endl; return; } // In this code we assume the matrix sizes are multiple of tile size if ((m%TILE_SZ) || (n%TILE_SZ)) { std::cerr << "unsupported size of matrix. m should be multiple of " << TILE_SZ << "; n should be multiple of " << TILE_SZ << std::endl; } dim3 grid( m/TILE_SZ, n/TILE_SZ ), threads( TILE_SZ, TILE_SZ ); hipLaunchKernelGGL(( mysgemmNT), dim3(grid), dim3(threads), 0, 0, A, lda, B, ldb, C, ldc, k, alpha, beta); CHECK_ERROR("mySgemm"); }
17bfa1a9ef711f33c32d117f860a4aed0ca2f20b.cu
/*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ /* * Kernel of dense matrix-matrix multiplication kernel. */ #define CHECK_ERROR(errorMessage) { \ cudaError_t err = cudaGetLastError(); \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, cudaGetErrorString( err) );\ exit(EXIT_FAILURE); \ } \ } // Parameters of tile sizes #define TILE_SZ 16 __global__ void mysgemmNT( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta ) { float c = 0.0f; int m = blockIdx.x * blockDim.x + threadIdx.x; int n = blockIdx.y * blockDim.y + threadIdx.y; for (int i = 0; i < k; ++i) { float a = A[m + i * lda]; float b = B[n + i * ldb]; c += a * b; } C[m+n*ldc] = C[m+n*ldc] * beta + alpha * c; } void basicSgemm( char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc ) { if ((transa != 'N') && (transa != 'n')) { std::cerr << "unsupported value of 'transa' in regtileSgemm()" << std::endl; return; } if ((transb != 'T') && (transb != 't')) { std::cerr << "unsupported value of 'transb' in regtileSgemm()" << std::endl; return; } // In this code we assume the matrix sizes are multiple of tile size if ((m%TILE_SZ) || (n%TILE_SZ)) { std::cerr << "unsupported size of matrix. m should be multiple of " << TILE_SZ << "; n should be multiple of " << TILE_SZ << std::endl; } dim3 grid( m/TILE_SZ, n/TILE_SZ ), threads( TILE_SZ, TILE_SZ ); mysgemmNT<<<grid, threads>>>( A, lda, B, ldb, C, ldc, k, alpha, beta); CHECK_ERROR("mySgemm"); }
03a13d302dfd2ccb7e2a493b0656d27523727cea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/utility.hpp" #include "opencv2/core/cuda/reduce.hpp" #include "opencv2/core/cuda/limits.hpp" #include "opencv2/core/cuda/vec_distance.hpp" #include "opencv2/core/cuda/datamov_utils.hpp" #include "opencv2/core/cuda/warp_shuffle.hpp" namespace cv { namespace cuda { namespace device { namespace bf_knnmatch { /////////////////////////////////////////////////////////////////////////////// // Reduction template <int BLOCK_SIZE> __device__ void findBestMatch(float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, float* s_distance, int* s_trainIdx) { #if __CUDA_ARCH__ >= 300 CV_UNUSED(s_distance); CV_UNUSED(s_trainIdx); float d1, d2; int i1, i2; #pragma unroll for (int i = BLOCK_SIZE / 2; i >= 1; i /= 2) { d1 = shfl_down(bestDistance1, i, BLOCK_SIZE); d2 = shfl_down(bestDistance2, i, BLOCK_SIZE); i1 = shfl_down(bestTrainIdx1, i, BLOCK_SIZE); i2 = shfl_down(bestTrainIdx2, i, BLOCK_SIZE); if (bestDistance1 < d1) { if (d1 < bestDistance2) { bestDistance2 = d1; bestTrainIdx2 = i1; } } else { bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestDistance1 = d1; bestTrainIdx1 = i1; if (d2 < bestDistance2) { bestDistance2 = d2; bestTrainIdx2 = i2; } } } #else float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; s_distance += threadIdx.y * BLOCK_SIZE; s_trainIdx += threadIdx.y * BLOCK_SIZE; s_distance[threadIdx.x] = bestDistance1; s_trainIdx[threadIdx.x] = bestTrainIdx1; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance1) { myBestDistance2 = myBestDistance1; myBestTrainIdx2 = myBestTrainIdx1; myBestDistance1 = val; myBestTrainIdx1 = s_trainIdx[i]; } else if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; } } } __syncthreads(); s_distance[threadIdx.x] = bestDistance2; s_trainIdx[threadIdx.x] = bestTrainIdx2; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; } } } bestDistance1 = myBestDistance1; bestDistance2 = myBestDistance2; bestTrainIdx1 = myBestTrainIdx1; bestTrainIdx2 = myBestTrainIdx2; #endif } template <int BLOCK_SIZE> __device__ void findBestMatch(float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2, float* s_distance, int* s_trainIdx, int* s_imgIdx) { #if __CUDA_ARCH__ >= 300 CV_UNUSED(s_distance); CV_UNUSED(s_trainIdx); CV_UNUSED(s_imgIdx); float d1, d2; int i1, i2; int j1, j2; #pragma unroll for (int i = BLOCK_SIZE / 2; i >= 1; i /= 2) { d1 = shfl_down(bestDistance1, i, BLOCK_SIZE); d2 = shfl_down(bestDistance2, i, BLOCK_SIZE); i1 = shfl_down(bestTrainIdx1, i, BLOCK_SIZE); i2 = shfl_down(bestTrainIdx2, i, BLOCK_SIZE); j1 = shfl_down(bestImgIdx1, i, BLOCK_SIZE); j2 = shfl_down(bestImgIdx2, i, BLOCK_SIZE); if (bestDistance1 < d1) { if (d1 < bestDistance2) { bestDistance2 = d1; bestTrainIdx2 = i1; bestImgIdx2 = j1; } } else { bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx2 = bestImgIdx1; bestDistance1 = d1; bestTrainIdx1 = i1; bestImgIdx1 = j1; if (d2 < bestDistance2) { bestDistance2 = d2; bestTrainIdx2 = i2; bestImgIdx2 = j2; } } } #else float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; s_distance += threadIdx.y * BLOCK_SIZE; s_trainIdx += threadIdx.y * BLOCK_SIZE; s_imgIdx += threadIdx.y * BLOCK_SIZE; s_distance[threadIdx.x] = bestDistance1; s_trainIdx[threadIdx.x] = bestTrainIdx1; s_imgIdx[threadIdx.x] = bestImgIdx1; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance1) { myBestDistance2 = myBestDistance1; myBestTrainIdx2 = myBestTrainIdx1; myBestImgIdx2 = myBestImgIdx1; myBestDistance1 = val; myBestTrainIdx1 = s_trainIdx[i]; myBestImgIdx1 = s_imgIdx[i]; } else if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; myBestImgIdx2 = s_imgIdx[i]; } } } __syncthreads(); s_distance[threadIdx.x] = bestDistance2; s_trainIdx[threadIdx.x] = bestTrainIdx2; s_imgIdx[threadIdx.x] = bestImgIdx2; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; myBestImgIdx2 = s_imgIdx[i]; } } } bestDistance1 = myBestDistance1; bestDistance2 = myBestDistance2; bestTrainIdx1 = myBestTrainIdx1; bestTrainIdx2 = myBestTrainIdx2; bestImgIdx1 = myBestImgIdx1; bestImgIdx2 = myBestImgIdx2; #endif } /////////////////////////////////////////////////////////////////////////////// // Match Unrolled Cached template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U> __device__ void loadQueryToSmem(int queryIdx, const PtrStepSz<T>& query, U* s_query) { #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __device__ void loopUnrolledCached(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < train.cols) { T val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN); loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN); loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match Unrolled template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __device__ void loopUnrolled(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < query.cols) { T val; ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val); s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __device__ void loop(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < query.cols) { T val; ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val); s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void match(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void match(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( match<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void match(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void match(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( match<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // knnMatch 2 dispatcher template <typename Dist, typename T, typename Mask> void match2Dispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, hipStream_t stream) { if (query.cols <= 64) { matchUnrolledCached<16, 64, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 128) { matchUnrolledCached<16, 128, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } /*else if (query.cols <= 256) { matchUnrolled<16, 256, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 512) { matchUnrolled<16, 512, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 1024) { matchUnrolled<16, 1024, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); }*/ else { match<16, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } } template <typename Dist, typename T, typename Mask> void match2Dispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream) { if (query.cols <= 64) { matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 128) { matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } /*else if (query.cols <= 256) { matchUnrolled<16, 256, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 512) { matchUnrolled<16, 512, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 1024) { matchUnrolled<16, 1024, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); }*/ else { match<16, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } } /////////////////////////////////////////////////////////////////////////////// // Calc distance kernel template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void calcDistanceUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, PtrStepf allDist) { extern __shared__ int smem[]; const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y; const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; if (loadX < query.cols) { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(::min(queryIdx, query.rows - 1))[loadX]; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX]; } else { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } if (queryIdx < query.rows && trainIdx < train.rows) { float distVal = numeric_limits<float>::max(); if (mask(queryIdx, trainIdx)) distVal = (typename Dist::result_type)dist; allDist.ptr(queryIdx)[trainIdx] = distVal; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void calcDistanceUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzf& allDist, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( calcDistanceUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, allDist); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void calcDistance(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, PtrStepf allDist) { extern __shared__ int smem[]; const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y; const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); Dist dist; for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; if (loadX < query.cols) { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(::min(queryIdx, query.rows - 1))[loadX]; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX]; } else { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } if (queryIdx < query.rows && trainIdx < train.rows) { float distVal = numeric_limits<float>::max(); if (mask(queryIdx, trainIdx)) distVal = (typename Dist::result_type)dist; allDist.ptr(queryIdx)[trainIdx] = distVal; } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void calcDistance(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzf& allDist, hipStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); hipLaunchKernelGGL(( calcDistance<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, allDist); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Calc Distance dispatcher template <typename Dist, typename T, typename Mask> void calcDistanceDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzf& allDist, hipStream_t stream) { if (query.cols <= 64) { calcDistanceUnrolled<16, 64, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 128) { calcDistanceUnrolled<16, 128, Dist>(query, train, mask, allDist, stream); } /*else if (query.cols <= 256) { calcDistanceUnrolled<16, 256, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 512) { calcDistanceUnrolled<16, 512, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 1024) { calcDistanceUnrolled<16, 1024, Dist>(query, train, mask, allDist, stream); }*/ else { calcDistance<16, Dist>(query, train, mask, allDist, stream); } } /////////////////////////////////////////////////////////////////////////////// // find knn match kernel template <int BLOCK_SIZE> __global__ void findBestMatch(PtrStepSzf allDist, int i, PtrStepi trainIdx, PtrStepf distance) { const int SMEM_SIZE = BLOCK_SIZE > 64 ? BLOCK_SIZE : 64; __shared__ float s_dist[SMEM_SIZE]; __shared__ int s_trainIdx[SMEM_SIZE]; const int queryIdx = blockIdx.x; float* allDistRow = allDist.ptr(queryIdx); float dist = numeric_limits<float>::max(); int bestIdx = -1; for (int i = threadIdx.x; i < allDist.cols; i += BLOCK_SIZE) { float reg = allDistRow[i]; if (reg < dist) { dist = reg; bestIdx = i; } } s_dist[threadIdx.x] = dist; s_trainIdx[threadIdx.x] = bestIdx; __syncthreads(); reduceKeyVal<BLOCK_SIZE>(s_dist, dist, s_trainIdx, bestIdx, threadIdx.x, less<float>()); if (threadIdx.x == 0) { if (dist < numeric_limits<float>::max()) { allDistRow[bestIdx] = numeric_limits<float>::max(); trainIdx.ptr(queryIdx)[i] = bestIdx; distance.ptr(queryIdx)[i] = dist; } } } template <int BLOCK_SIZE> void findKnnMatch(int k, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, const PtrStepSzf& allDist, hipStream_t stream) { const dim3 block(BLOCK_SIZE, 1, 1); const dim3 grid(trainIdx.rows, 1, 1); for (int i = 0; i < k; ++i) { hipLaunchKernelGGL(( findBestMatch<BLOCK_SIZE>), dim3(grid), dim3(block), 0, stream, allDist, i, trainIdx, distance); cudaSafeCall( hipGetLastError() ); } if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } void findKnnMatchDispatcher(int k, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream) { findKnnMatch<256>(k, static_cast<PtrStepSzi>(trainIdx), static_cast<PtrStepSzf>(distance), allDist, stream); } /////////////////////////////////////////////////////////////////////////////// // knn match Dispatcher template <typename Dist, typename T, typename Mask> void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, int k, const Mask& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream) { if (k == 2) { match2Dispatcher<Dist>(query, train, mask, trainIdx, distance, stream); } else { calcDistanceDispatcher<Dist>(query, train, mask, allDist, stream); findKnnMatchDispatcher(k, trainIdx, distance, allDist, stream); } } /////////////////////////////////////////////////////////////////////////////// // knn match caller template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream) { if (mask.data) matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, stream); else matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, stream); } template void matchL1_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); //template void matchL1_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); template void matchL1_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); template void matchL1_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); template void matchL1_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); template void matchL1_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream) { if (mask.data) matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, stream); else matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, stream); } //template void matchL2_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); //template void matchL2_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); //template void matchL2_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); //template void matchL2_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); //template void matchL2_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); template void matchL2_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream) { if (mask.data) matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, stream); else matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, stream); } template void matchHamming_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); //template void matchHamming_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); template void matchHamming_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); //template void matchHamming_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); template void matchHamming_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, hipStream_t stream); template <typename T> void match2L1_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream) { if (masks.data) match2Dispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); else match2Dispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } template void match2L1_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); //template void match2L1_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); template void match2L1_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); template void match2L1_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); template void match2L1_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); template void match2L1_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); template <typename T> void match2L2_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream) { if (masks.data) match2Dispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); else match2Dispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } //template void match2L2_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); //template void match2L2_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); //template void match2L2_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); //template void match2L2_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); //template void match2L2_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzb& distance, hipStream_t stream); template void match2L2_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); template <typename T> void match2Hamming_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream) { if (masks.data) match2Dispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); else match2Dispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } template void match2Hamming_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); //template void match2Hamming_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); template void match2Hamming_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); //template void match2Hamming_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); template void match2Hamming_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, hipStream_t stream); } // namespace bf_knnmatch }}} // namespace cv { namespace cuda { namespace cudev { #endif /* CUDA_DISABLER */
03a13d302dfd2ccb7e2a493b0656d27523727cea.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/utility.hpp" #include "opencv2/core/cuda/reduce.hpp" #include "opencv2/core/cuda/limits.hpp" #include "opencv2/core/cuda/vec_distance.hpp" #include "opencv2/core/cuda/datamov_utils.hpp" #include "opencv2/core/cuda/warp_shuffle.hpp" namespace cv { namespace cuda { namespace device { namespace bf_knnmatch { /////////////////////////////////////////////////////////////////////////////// // Reduction template <int BLOCK_SIZE> __device__ void findBestMatch(float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, float* s_distance, int* s_trainIdx) { #if __CUDA_ARCH__ >= 300 CV_UNUSED(s_distance); CV_UNUSED(s_trainIdx); float d1, d2; int i1, i2; #pragma unroll for (int i = BLOCK_SIZE / 2; i >= 1; i /= 2) { d1 = shfl_down(bestDistance1, i, BLOCK_SIZE); d2 = shfl_down(bestDistance2, i, BLOCK_SIZE); i1 = shfl_down(bestTrainIdx1, i, BLOCK_SIZE); i2 = shfl_down(bestTrainIdx2, i, BLOCK_SIZE); if (bestDistance1 < d1) { if (d1 < bestDistance2) { bestDistance2 = d1; bestTrainIdx2 = i1; } } else { bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestDistance1 = d1; bestTrainIdx1 = i1; if (d2 < bestDistance2) { bestDistance2 = d2; bestTrainIdx2 = i2; } } } #else float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; s_distance += threadIdx.y * BLOCK_SIZE; s_trainIdx += threadIdx.y * BLOCK_SIZE; s_distance[threadIdx.x] = bestDistance1; s_trainIdx[threadIdx.x] = bestTrainIdx1; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance1) { myBestDistance2 = myBestDistance1; myBestTrainIdx2 = myBestTrainIdx1; myBestDistance1 = val; myBestTrainIdx1 = s_trainIdx[i]; } else if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; } } } __syncthreads(); s_distance[threadIdx.x] = bestDistance2; s_trainIdx[threadIdx.x] = bestTrainIdx2; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; } } } bestDistance1 = myBestDistance1; bestDistance2 = myBestDistance2; bestTrainIdx1 = myBestTrainIdx1; bestTrainIdx2 = myBestTrainIdx2; #endif } template <int BLOCK_SIZE> __device__ void findBestMatch(float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2, float* s_distance, int* s_trainIdx, int* s_imgIdx) { #if __CUDA_ARCH__ >= 300 CV_UNUSED(s_distance); CV_UNUSED(s_trainIdx); CV_UNUSED(s_imgIdx); float d1, d2; int i1, i2; int j1, j2; #pragma unroll for (int i = BLOCK_SIZE / 2; i >= 1; i /= 2) { d1 = shfl_down(bestDistance1, i, BLOCK_SIZE); d2 = shfl_down(bestDistance2, i, BLOCK_SIZE); i1 = shfl_down(bestTrainIdx1, i, BLOCK_SIZE); i2 = shfl_down(bestTrainIdx2, i, BLOCK_SIZE); j1 = shfl_down(bestImgIdx1, i, BLOCK_SIZE); j2 = shfl_down(bestImgIdx2, i, BLOCK_SIZE); if (bestDistance1 < d1) { if (d1 < bestDistance2) { bestDistance2 = d1; bestTrainIdx2 = i1; bestImgIdx2 = j1; } } else { bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx2 = bestImgIdx1; bestDistance1 = d1; bestTrainIdx1 = i1; bestImgIdx1 = j1; if (d2 < bestDistance2) { bestDistance2 = d2; bestTrainIdx2 = i2; bestImgIdx2 = j2; } } } #else float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; s_distance += threadIdx.y * BLOCK_SIZE; s_trainIdx += threadIdx.y * BLOCK_SIZE; s_imgIdx += threadIdx.y * BLOCK_SIZE; s_distance[threadIdx.x] = bestDistance1; s_trainIdx[threadIdx.x] = bestTrainIdx1; s_imgIdx[threadIdx.x] = bestImgIdx1; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance1) { myBestDistance2 = myBestDistance1; myBestTrainIdx2 = myBestTrainIdx1; myBestImgIdx2 = myBestImgIdx1; myBestDistance1 = val; myBestTrainIdx1 = s_trainIdx[i]; myBestImgIdx1 = s_imgIdx[i]; } else if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; myBestImgIdx2 = s_imgIdx[i]; } } } __syncthreads(); s_distance[threadIdx.x] = bestDistance2; s_trainIdx[threadIdx.x] = bestTrainIdx2; s_imgIdx[threadIdx.x] = bestImgIdx2; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; myBestImgIdx2 = s_imgIdx[i]; } } } bestDistance1 = myBestDistance1; bestDistance2 = myBestDistance2; bestTrainIdx1 = myBestTrainIdx1; bestTrainIdx2 = myBestTrainIdx2; bestImgIdx1 = myBestImgIdx1; bestImgIdx2 = myBestImgIdx2; #endif } /////////////////////////////////////////////////////////////////////////////// // Match Unrolled Cached template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U> __device__ void loadQueryToSmem(int queryIdx, const PtrStepSz<T>& query, U* s_query) { #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __device__ void loopUnrolledCached(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < train.cols) { T val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN); loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN); loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match Unrolled template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __device__ void loopUnrolled(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < query.cols) { T val; ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val); s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __device__ void loop(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < query.cols) { T val; ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val); s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void match(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void match(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void match(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void match(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // knnMatch 2 dispatcher template <typename Dist, typename T, typename Mask> void match2Dispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, cudaStream_t stream) { if (query.cols <= 64) { matchUnrolledCached<16, 64, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 128) { matchUnrolledCached<16, 128, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } /*else if (query.cols <= 256) { matchUnrolled<16, 256, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 512) { matchUnrolled<16, 512, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 1024) { matchUnrolled<16, 1024, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); }*/ else { match<16, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } } template <typename Dist, typename T, typename Mask> void match2Dispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream) { if (query.cols <= 64) { matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 128) { matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } /*else if (query.cols <= 256) { matchUnrolled<16, 256, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 512) { matchUnrolled<16, 512, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 1024) { matchUnrolled<16, 1024, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); }*/ else { match<16, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } } /////////////////////////////////////////////////////////////////////////////// // Calc distance kernel template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void calcDistanceUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, PtrStepf allDist) { extern __shared__ int smem[]; const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y; const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; if (loadX < query.cols) { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(::min(queryIdx, query.rows - 1))[loadX]; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX]; } else { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } if (queryIdx < query.rows && trainIdx < train.rows) { float distVal = numeric_limits<float>::max(); if (mask(queryIdx, trainIdx)) distVal = (typename Dist::result_type)dist; allDist.ptr(queryIdx)[trainIdx] = distVal; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void calcDistanceUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzf& allDist, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); calcDistanceUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, allDist); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void calcDistance(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, PtrStepf allDist) { extern __shared__ int smem[]; const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y; const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); Dist dist; for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; if (loadX < query.cols) { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(::min(queryIdx, query.rows - 1))[loadX]; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX]; } else { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } if (queryIdx < query.rows && trainIdx < train.rows) { float distVal = numeric_limits<float>::max(); if (mask(queryIdx, trainIdx)) distVal = (typename Dist::result_type)dist; allDist.ptr(queryIdx)[trainIdx] = distVal; } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void calcDistance(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzf& allDist, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); calcDistance<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, allDist); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Calc Distance dispatcher template <typename Dist, typename T, typename Mask> void calcDistanceDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzf& allDist, cudaStream_t stream) { if (query.cols <= 64) { calcDistanceUnrolled<16, 64, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 128) { calcDistanceUnrolled<16, 128, Dist>(query, train, mask, allDist, stream); } /*else if (query.cols <= 256) { calcDistanceUnrolled<16, 256, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 512) { calcDistanceUnrolled<16, 512, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 1024) { calcDistanceUnrolled<16, 1024, Dist>(query, train, mask, allDist, stream); }*/ else { calcDistance<16, Dist>(query, train, mask, allDist, stream); } } /////////////////////////////////////////////////////////////////////////////// // find knn match kernel template <int BLOCK_SIZE> __global__ void findBestMatch(PtrStepSzf allDist, int i, PtrStepi trainIdx, PtrStepf distance) { const int SMEM_SIZE = BLOCK_SIZE > 64 ? BLOCK_SIZE : 64; __shared__ float s_dist[SMEM_SIZE]; __shared__ int s_trainIdx[SMEM_SIZE]; const int queryIdx = blockIdx.x; float* allDistRow = allDist.ptr(queryIdx); float dist = numeric_limits<float>::max(); int bestIdx = -1; for (int i = threadIdx.x; i < allDist.cols; i += BLOCK_SIZE) { float reg = allDistRow[i]; if (reg < dist) { dist = reg; bestIdx = i; } } s_dist[threadIdx.x] = dist; s_trainIdx[threadIdx.x] = bestIdx; __syncthreads(); reduceKeyVal<BLOCK_SIZE>(s_dist, dist, s_trainIdx, bestIdx, threadIdx.x, less<float>()); if (threadIdx.x == 0) { if (dist < numeric_limits<float>::max()) { allDistRow[bestIdx] = numeric_limits<float>::max(); trainIdx.ptr(queryIdx)[i] = bestIdx; distance.ptr(queryIdx)[i] = dist; } } } template <int BLOCK_SIZE> void findKnnMatch(int k, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, const PtrStepSzf& allDist, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, 1, 1); const dim3 grid(trainIdx.rows, 1, 1); for (int i = 0; i < k; ++i) { findBestMatch<BLOCK_SIZE><<<grid, block, 0, stream>>>(allDist, i, trainIdx, distance); cudaSafeCall( cudaGetLastError() ); } if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } void findKnnMatchDispatcher(int k, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream) { findKnnMatch<256>(k, static_cast<PtrStepSzi>(trainIdx), static_cast<PtrStepSzf>(distance), allDist, stream); } /////////////////////////////////////////////////////////////////////////////// // knn match Dispatcher template <typename Dist, typename T, typename Mask> void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, int k, const Mask& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream) { if (k == 2) { match2Dispatcher<Dist>(query, train, mask, trainIdx, distance, stream); } else { calcDistanceDispatcher<Dist>(query, train, mask, allDist, stream); findKnnMatchDispatcher(k, trainIdx, distance, allDist, stream); } } /////////////////////////////////////////////////////////////////////////////// // knn match caller template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream) { if (mask.data) matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, stream); else matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, stream); } template void matchL1_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); //template void matchL1_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template void matchL1_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template void matchL1_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template void matchL1_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template void matchL1_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream) { if (mask.data) matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, stream); else matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, stream); } //template void matchL2_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); //template void matchL2_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); //template void matchL2_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); //template void matchL2_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); //template void matchL2_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template void matchL2_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream) { if (mask.data) matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, stream); else matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, stream); } template void matchHamming_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); //template void matchHamming_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template void matchHamming_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); //template void matchHamming_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template void matchHamming_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template <typename T> void match2L1_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream) { if (masks.data) match2Dispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); else match2Dispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } template void match2L1_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); //template void match2L1_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template void match2L1_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template void match2L1_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template void match2L1_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template void match2L1_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template <typename T> void match2L2_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream) { if (masks.data) match2Dispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); else match2Dispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } //template void match2L2_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); //template void match2L2_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); //template void match2L2_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); //template void match2L2_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); //template void match2L2_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template void match2L2_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template <typename T> void match2Hamming_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream) { if (masks.data) match2Dispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); else match2Dispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } template void match2Hamming_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); //template void match2Hamming_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template void match2Hamming_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); //template void match2Hamming_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template void match2Hamming_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); } // namespace bf_knnmatch }}} // namespace cv { namespace cuda { namespace cudev { #endif /* CUDA_DISABLER */
d4f372ed76d4c001a1a4eacce60bb79cb65d0046.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" //implementation of fractional max pooling by Ben Graham, using a combination of code from caffe and torch template <typename Dtype> __global__ void FracMaxPoolForward(const int nthreads, const Dtype* bottom_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype *stride_h_start, const Dtype * stride_h_end, const Dtype *stride_w_start, const Dtype *stride_w_end, Dtype* top_data, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = max((int)stride_h_start[ph], 0); int wstart = max((int)stride_w_start[pw], 0); int hend = min((int)stride_h_end[ph], height); int wend = min((int)stride_w_end[pw], width); int offset = (n * channels + c) * height * width; const Dtype * bottom = bottom_data + offset; //Dtype maxval = -FLT_MAX; //int maxidx = 0; int maxidx = hstart * width + wstart; Dtype maxval = bottom[maxidx]; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom[maxidx]; } } } top_data[index] = maxval; top_mask[index] = maxidx + offset; } } template <typename Dtype> __global__ void FracMaxPoolBackward(const int nthreads, Dtype* top_diff, Dtype* top_mask, Dtype * bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int bottom_index = top_mask[index]; bottom_diff[bottom_index] += top_diff[index]; } } template <typename Dtype> __global__ void FracMaxPoolBackwardAtomic(const int nthreads, Dtype* top_diff, Dtype* top_mask, Dtype * bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int bottom_index = top_mask[index]; atomicAdd(bottom_diff+bottom_index,top_diff[index]); } } static int cunn_FracSpatialMaxPooling_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); int nOutputRows = luaL_checkint(L, 3); int nOutputCols = luaL_checkint(L, 4); THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); THCudaTensor *indices = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "indices", "torch.CudaTensor"); THCudaTensor *row_ind_start = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "row_ind_start", "torch.CudaTensor"); THCudaTensor *row_ind_end = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "row_ind_end", "torch.CudaTensor"); THCudaTensor *col_ind_start = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "col_ind_start", "torch.CudaTensor"); THCudaTensor *col_ind_end = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "col_ind_end", "torch.CudaTensor"); bool olap = luaT_getfieldcheckboolean(L, 1, "olap"); float *indices_data; float *output_data; float *input_data; float *col_ind_start_data; float *col_ind_end_data; float *row_ind_start_data; float *row_ind_end_data; luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); int nBatch = 1; int nInputPlane; int nInputRows; int nInputCols; if (input->nDimension == 3) { nInputPlane = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; //THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols); } else { nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; nBatch = input->size[0]; //THCudaTensor_resize4d(state, output, input->size[0], nInputPlane, nOutputRows, nOutputCols); } int count = nBatch * nInputPlane * nOutputRows * nOutputCols; //THCudaTensor_resize1d(state, indices, count); //output = THCudaTensor_newContiguous(state, output); input = THCudaTensor_newContiguous(state, input); indices_data = THCudaTensor_data(state, indices); output_data = THCudaTensor_data(state, output); input_data = THCudaTensor_data(state, input); col_ind_start_data = THCudaTensor_data(state, col_ind_start); row_ind_start_data = THCudaTensor_data(state, row_ind_start); col_ind_end_data = THCudaTensor_data(state, col_ind_end); row_ind_end_data = THCudaTensor_data(state, row_ind_end); hipLaunchKernelGGL(( FracMaxPoolForward<float>), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, input_data, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, row_ind_start_data, row_ind_end_data, col_ind_start_data, col_ind_end_data, output_data, indices_data); THCudaTensor_free(state, input); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in FracMaxPoolingForward.updateOutput: %s\n", hipGetErrorString(err)); THError("aborting"); } return 1; } static int cunn_FracSpatialMaxPooling_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); THCudaTensor *indices = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "indices", "torch.CudaTensor"); luaL_argcheck(L, gradOutput->nDimension == 3 || gradOutput->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); bool olap = luaT_getfieldcheckboolean(L, 1, "olap"); float *indices_data; float *gradInput_data; float *gradOutput_data; gradOutput = THCudaTensor_newContiguous(state, gradOutput); int count = THCudaTensor_nElement(state, gradOutput); //THCudaTensor_resizeAs(state, gradInput, input); //gradInput = THCudaTensor_newContiguous(state, gradInput); THCudaTensor_zero(state, gradInput); indices_data = THCudaTensor_data(state, indices); gradOutput_data = THCudaTensor_data(state, gradOutput); gradInput_data = THCudaTensor_data(state, gradInput); if (olap) hipLaunchKernelGGL(( FracMaxPoolBackwardAtomic<float>), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, gradOutput_data, indices_data, gradInput_data); else hipLaunchKernelGGL(( FracMaxPoolBackward<float>), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, gradOutput_data, indices_data, gradInput_data); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in FracSpatialMaxsampling.updateGradInput: %s\n", hipGetErrorString(err)); THError("aborting"); } // clean THCudaTensor_free(state, gradOutput); return 1; } static const struct luaL_Reg cunn_FracSpatialMaxPooling__ [] = { {"FracSpatialMaxPooling_updateOutput", cunn_FracSpatialMaxPooling_updateOutput}, {"FracSpatialMaxPooling_updateGradInput", cunn_FracSpatialMaxPooling_updateGradInput}, {NULL, NULL} }; static void cunnx_FracSpatialMaxPooling_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_FracSpatialMaxPooling__, "nn"); lua_pop(L,1); }
d4f372ed76d4c001a1a4eacce60bb79cb65d0046.cu
#include "utils.h" //implementation of fractional max pooling by Ben Graham, using a combination of code from caffe and torch template <typename Dtype> __global__ void FracMaxPoolForward(const int nthreads, const Dtype* bottom_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype *stride_h_start, const Dtype * stride_h_end, const Dtype *stride_w_start, const Dtype *stride_w_end, Dtype* top_data, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = max((int)stride_h_start[ph], 0); int wstart = max((int)stride_w_start[pw], 0); int hend = min((int)stride_h_end[ph], height); int wend = min((int)stride_w_end[pw], width); int offset = (n * channels + c) * height * width; const Dtype * bottom = bottom_data + offset; //Dtype maxval = -FLT_MAX; //int maxidx = 0; int maxidx = hstart * width + wstart; Dtype maxval = bottom[maxidx]; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom[maxidx]; } } } top_data[index] = maxval; top_mask[index] = maxidx + offset; } } template <typename Dtype> __global__ void FracMaxPoolBackward(const int nthreads, Dtype* top_diff, Dtype* top_mask, Dtype * bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int bottom_index = top_mask[index]; bottom_diff[bottom_index] += top_diff[index]; } } template <typename Dtype> __global__ void FracMaxPoolBackwardAtomic(const int nthreads, Dtype* top_diff, Dtype* top_mask, Dtype * bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int bottom_index = top_mask[index]; atomicAdd(bottom_diff+bottom_index,top_diff[index]); } } static int cunn_FracSpatialMaxPooling_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); int nOutputRows = luaL_checkint(L, 3); int nOutputCols = luaL_checkint(L, 4); THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); THCudaTensor *indices = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "indices", "torch.CudaTensor"); THCudaTensor *row_ind_start = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "row_ind_start", "torch.CudaTensor"); THCudaTensor *row_ind_end = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "row_ind_end", "torch.CudaTensor"); THCudaTensor *col_ind_start = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "col_ind_start", "torch.CudaTensor"); THCudaTensor *col_ind_end = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "col_ind_end", "torch.CudaTensor"); bool olap = luaT_getfieldcheckboolean(L, 1, "olap"); float *indices_data; float *output_data; float *input_data; float *col_ind_start_data; float *col_ind_end_data; float *row_ind_start_data; float *row_ind_end_data; luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); int nBatch = 1; int nInputPlane; int nInputRows; int nInputCols; if (input->nDimension == 3) { nInputPlane = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; //THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols); } else { nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; nBatch = input->size[0]; //THCudaTensor_resize4d(state, output, input->size[0], nInputPlane, nOutputRows, nOutputCols); } int count = nBatch * nInputPlane * nOutputRows * nOutputCols; //THCudaTensor_resize1d(state, indices, count); //output = THCudaTensor_newContiguous(state, output); input = THCudaTensor_newContiguous(state, input); indices_data = THCudaTensor_data(state, indices); output_data = THCudaTensor_data(state, output); input_data = THCudaTensor_data(state, input); col_ind_start_data = THCudaTensor_data(state, col_ind_start); row_ind_start_data = THCudaTensor_data(state, row_ind_start); col_ind_end_data = THCudaTensor_data(state, col_ind_end); row_ind_end_data = THCudaTensor_data(state, row_ind_end); FracMaxPoolForward<float><<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, input_data, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, row_ind_start_data, row_ind_end_data, col_ind_start_data, col_ind_end_data, output_data, indices_data); THCudaTensor_free(state, input); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in FracMaxPoolingForward.updateOutput: %s\n", cudaGetErrorString(err)); THError("aborting"); } return 1; } static int cunn_FracSpatialMaxPooling_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); THCudaTensor *indices = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "indices", "torch.CudaTensor"); luaL_argcheck(L, gradOutput->nDimension == 3 || gradOutput->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); bool olap = luaT_getfieldcheckboolean(L, 1, "olap"); float *indices_data; float *gradInput_data; float *gradOutput_data; gradOutput = THCudaTensor_newContiguous(state, gradOutput); int count = THCudaTensor_nElement(state, gradOutput); //THCudaTensor_resizeAs(state, gradInput, input); //gradInput = THCudaTensor_newContiguous(state, gradInput); THCudaTensor_zero(state, gradInput); indices_data = THCudaTensor_data(state, indices); gradOutput_data = THCudaTensor_data(state, gradOutput); gradInput_data = THCudaTensor_data(state, gradInput); if (olap) FracMaxPoolBackwardAtomic<float><<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, gradOutput_data, indices_data, gradInput_data); else FracMaxPoolBackward<float><<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, gradOutput_data, indices_data, gradInput_data); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in FracSpatialMaxsampling.updateGradInput: %s\n", cudaGetErrorString(err)); THError("aborting"); } // clean THCudaTensor_free(state, gradOutput); return 1; } static const struct luaL_Reg cunn_FracSpatialMaxPooling__ [] = { {"FracSpatialMaxPooling_updateOutput", cunn_FracSpatialMaxPooling_updateOutput}, {"FracSpatialMaxPooling_updateGradInput", cunn_FracSpatialMaxPooling_updateGradInput}, {NULL, NULL} }; static void cunnx_FracSpatialMaxPooling_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_FracSpatialMaxPooling__, "nn"); lua_pop(L,1); }
fb7a3712934166356a6b1aacec51f0bcc9018705.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <bits/stdc++.h> using namespace std; #define TILE_WIDTH 32 void sgd_CPU(float *A,float *B,float *C,int epochs,float lamda,float alpha,int m,int n,int k){ for(int i=0;i<epochs;++i){ for(int x = 0;x<m;++x){ for(int y = 0;y<n;++y){ float error = 0; float temp = 0; for(int iter = 0;iter < k;++iter){ temp += B[x*k + iter]*C[iter*n + y]; } error = A[x*n+y] - temp; for(int iter = 0;iter < k;++iter){ B[x*k + iter] = B[x*k + iter] + alpha*((error * C[iter*n + y]) - lamda*(B[x*k + iter])); C[iter*n + y] = C[iter*n + y] + alpha*((error * B[x*k + iter]) - lamda*(C[iter*n + y])); } } } } } // Compute C = A * B __global__ void matrixMultiply(float *M,float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_WIDTH + ty, Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; else ds_M[ty][tx] = 0; if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; else ds_N[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns+Col] = M[Row*numCColumns+Col] - Pvalue; } __global__ void sgd_kernel(float *B,float *C,float *Error,float lamda,float alpha,int m,int n,int k){ int y = blockIdx.x * blockDim.x + threadIdx.x; int x = blockIdx.y * blockDim.y + threadIdx.y; if(y<k && x<m){ float error = 0; float gradient = 0; for(int j=0;j<n;++j){ error = Error[x*n + j]; gradient += error*C[y*n + j]; } __syncthreads(); B[x*k + y] += alpha*(gradient - lamda*(B[x*k + y])); } y = blockIdx.x * blockDim.x + threadIdx.x; x = blockIdx.y * blockDim.y + threadIdx.y; if(y<n && x<k){ float error = 0; float gradient = 0; for(int i=0;i<m;++i){ error = Error[i*n + y]; gradient += error*B[i*k + x]; } __syncthreads(); C[x*n + y] += alpha*(gradient - lamda*(C[x*n + y])); } } int main(){ int m,n; ifstream dataset("dataset.txt"); float *Ahost,*Bhost,*Chost,*Bhost1,*Chost1; float *A,*B,*C,*Error; //cout << "Enter dimensions of the matrix : "; //cin>>m>>n; m = 943; n = 1682; cout << "Enter k value : "; int k; cin>>k; int epochs; cout<<"Enter epochs for training : "; cin>>epochs; float alpha; cout<<"Enter alpha for training : "; cin>>alpha; float lamda; cout<<"Enter lamda (regularisation variable) for training : "; cin>>lamda; Ahost = new float[m*n]; Bhost = new float[m*k]; Bhost1 = new float[m*k]; Chost = new float[k*n]; Chost1 = new float[k*n]; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); srand(time(NULL)); memset(Ahost,0,sizeof(Ahost)); for(int i=0;i<100000;++i){ int temp; dataset >> temp; int x,y; x = --temp; dataset >> temp; y = --temp; dataset >> temp; Ahost[x*n + y] = temp; } /* //randomising the input array A for(int i=0;i<m;++i){ for(int j=0;j<n;++j){ Ahost[i*n + j] = rand()%5 + 1.03; } } */ // randomising the output array B for(int i=0;i<m;++i){ for(int j=0;j<k;++j){ Bhost[i*k + j] = rand()%2; Bhost1[i*k + j] = Bhost[i*k + j]; } } //randomising the output array C for(int i=0;i<k;++i){ for(int j=0;j<n;++j){ Chost[i*n + j] = rand()%2; Chost1[i*n + j] = Chost[i*n + j]; } } // cout<<" A : "<<endl; // for(int i=0;i<m;++i){ // for(int j=0;j<n;++j){ // cout<<Ahost[i*n+j]<<" "; // } // cout<<endl; // } // cout<<" B : \n"; // // for(int i=0;i<m;++i){ // for(int j=0;j<k;++j){ // cout<<Bhost[i*k+j]<< " "; // } // cout<<endl; // } hipMalloc((void **)&Error, sizeof(float) * m*n); hipMalloc((void **)&A, sizeof(float) * m*n); hipMalloc((void **)&B, sizeof(float) * m*k); hipMalloc((void **)&C, sizeof(float) * k*n); hipMemcpy(A, Ahost, sizeof(float) * m*n, hipMemcpyHostToDevice); hipMemcpy(B, Bhost, sizeof(float) * m*k, hipMemcpyHostToDevice); hipMemcpy(C, Chost, sizeof(float) * k*n, hipMemcpyHostToDevice); dim3 gridSize((n-1)/32 + 1,(m-1)/32 + 1,1); dim3 blockSize(32,32,1); hipEventRecord(start); for(int i=0;i<epochs;++i){ hipLaunchKernelGGL(( matrixMultiply), dim3(gridSize),dim3(blockSize), 0, 0, A,B,C,Error,m,k,k,n,m,n); hipLaunchKernelGGL(( sgd_kernel), dim3(gridSize),dim3(blockSize), 0, 0, B,C,Error,lamda,alpha,m,n,k); } hipEventRecord(stop); hipMemcpy(Bhost, B, sizeof(float) * m*k, hipMemcpyDeviceToHost); hipMemcpy(Chost, C, sizeof(float) * k*n, hipMemcpyDeviceToHost); // cout<<" Product : \n"; hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); cout<<"GPU kernel took "<<milliseconds/1000<<" seconds"<<endl; float temp[m][n]; for(int i=0;i<m;++i){ for(int j=0;j<n;++j){ temp[i][j] = 0; for(int l=0;l<k;++l){ temp[i][j]+= Bhost[i*k+l]*Chost[l*n+j]; } // cout<<temp[i][j]<< " "; } // cout<<endl; } float sumError = 0; for(int i=0;i<m;++i){ for(int j=0;j<n;++j){ float errorMat = Ahost[i*n+j] - temp[i][j]; sumError += errorMat*errorMat; } } cout<<"RMS error : "<<sqrt(sumError/(m*n))<<endl; double s = clock(); sgd_CPU(Ahost,Bhost1,Chost1,epochs,lamda,alpha,m,n,k); double e = clock(); cout<<"CPU implementation took "<< (e-s)/CLOCKS_PER_SEC<<" seconds\n"; for(int i=0;i<m;++i){ for(int j=0;j<n;++j){ temp[i][j] = 0; for(int l=0;l<k;++l){ temp[i][j]+= Bhost1[i*k+l]*Chost1[l*n+j]; } // cout<<temp[i][j]<< " "; } // cout<<endl; } sumError = 0; for(int i=0;i<m;++i){ for(int j=0;j<n;++j){ float errorMat = Ahost[i*n+j] - temp[i][j]; sumError += errorMat*errorMat; } } cout<<"RMS error : "<<sqrt(sumError/(m*n))<<endl; return 0; }
fb7a3712934166356a6b1aacec51f0bcc9018705.cu
#include <bits/stdc++.h> using namespace std; #define TILE_WIDTH 32 void sgd_CPU(float *A,float *B,float *C,int epochs,float lamda,float alpha,int m,int n,int k){ for(int i=0;i<epochs;++i){ for(int x = 0;x<m;++x){ for(int y = 0;y<n;++y){ float error = 0; float temp = 0; for(int iter = 0;iter < k;++iter){ temp += B[x*k + iter]*C[iter*n + y]; } error = A[x*n+y] - temp; for(int iter = 0;iter < k;++iter){ B[x*k + iter] = B[x*k + iter] + alpha*((error * C[iter*n + y]) - lamda*(B[x*k + iter])); C[iter*n + y] = C[iter*n + y] + alpha*((error * B[x*k + iter]) - lamda*(C[iter*n + y])); } } } } } // Compute C = A * B __global__ void matrixMultiply(float *M,float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_WIDTH + ty, Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; else ds_M[ty][tx] = 0; if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; else ds_N[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns+Col] = M[Row*numCColumns+Col] - Pvalue; } __global__ void sgd_kernel(float *B,float *C,float *Error,float lamda,float alpha,int m,int n,int k){ int y = blockIdx.x * blockDim.x + threadIdx.x; int x = blockIdx.y * blockDim.y + threadIdx.y; if(y<k && x<m){ float error = 0; float gradient = 0; for(int j=0;j<n;++j){ error = Error[x*n + j]; gradient += error*C[y*n + j]; } __syncthreads(); B[x*k + y] += alpha*(gradient - lamda*(B[x*k + y])); } y = blockIdx.x * blockDim.x + threadIdx.x; x = blockIdx.y * blockDim.y + threadIdx.y; if(y<n && x<k){ float error = 0; float gradient = 0; for(int i=0;i<m;++i){ error = Error[i*n + y]; gradient += error*B[i*k + x]; } __syncthreads(); C[x*n + y] += alpha*(gradient - lamda*(C[x*n + y])); } } int main(){ int m,n; ifstream dataset("dataset.txt"); float *Ahost,*Bhost,*Chost,*Bhost1,*Chost1; float *A,*B,*C,*Error; //cout << "Enter dimensions of the matrix : "; //cin>>m>>n; m = 943; n = 1682; cout << "Enter k value : "; int k; cin>>k; int epochs; cout<<"Enter epochs for training : "; cin>>epochs; float alpha; cout<<"Enter alpha for training : "; cin>>alpha; float lamda; cout<<"Enter lamda (regularisation variable) for training : "; cin>>lamda; Ahost = new float[m*n]; Bhost = new float[m*k]; Bhost1 = new float[m*k]; Chost = new float[k*n]; Chost1 = new float[k*n]; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); srand(time(NULL)); memset(Ahost,0,sizeof(Ahost)); for(int i=0;i<100000;++i){ int temp; dataset >> temp; int x,y; x = --temp; dataset >> temp; y = --temp; dataset >> temp; Ahost[x*n + y] = temp; } /* //randomising the input array A for(int i=0;i<m;++i){ for(int j=0;j<n;++j){ Ahost[i*n + j] = rand()%5 + 1.03; } } */ // randomising the output array B for(int i=0;i<m;++i){ for(int j=0;j<k;++j){ Bhost[i*k + j] = rand()%2; Bhost1[i*k + j] = Bhost[i*k + j]; } } //randomising the output array C for(int i=0;i<k;++i){ for(int j=0;j<n;++j){ Chost[i*n + j] = rand()%2; Chost1[i*n + j] = Chost[i*n + j]; } } // cout<<" A : "<<endl; // for(int i=0;i<m;++i){ // for(int j=0;j<n;++j){ // cout<<Ahost[i*n+j]<<" "; // } // cout<<endl; // } // cout<<" B : \n"; // // for(int i=0;i<m;++i){ // for(int j=0;j<k;++j){ // cout<<Bhost[i*k+j]<< " "; // } // cout<<endl; // } cudaMalloc((void **)&Error, sizeof(float) * m*n); cudaMalloc((void **)&A, sizeof(float) * m*n); cudaMalloc((void **)&B, sizeof(float) * m*k); cudaMalloc((void **)&C, sizeof(float) * k*n); cudaMemcpy(A, Ahost, sizeof(float) * m*n, cudaMemcpyHostToDevice); cudaMemcpy(B, Bhost, sizeof(float) * m*k, cudaMemcpyHostToDevice); cudaMemcpy(C, Chost, sizeof(float) * k*n, cudaMemcpyHostToDevice); dim3 gridSize((n-1)/32 + 1,(m-1)/32 + 1,1); dim3 blockSize(32,32,1); cudaEventRecord(start); for(int i=0;i<epochs;++i){ matrixMultiply<<<gridSize,blockSize>>>(A,B,C,Error,m,k,k,n,m,n); sgd_kernel<<<gridSize,blockSize>>>(B,C,Error,lamda,alpha,m,n,k); } cudaEventRecord(stop); cudaMemcpy(Bhost, B, sizeof(float) * m*k, cudaMemcpyDeviceToHost); cudaMemcpy(Chost, C, sizeof(float) * k*n, cudaMemcpyDeviceToHost); // cout<<" Product : \n"; cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cout<<"GPU kernel took "<<milliseconds/1000<<" seconds"<<endl; float temp[m][n]; for(int i=0;i<m;++i){ for(int j=0;j<n;++j){ temp[i][j] = 0; for(int l=0;l<k;++l){ temp[i][j]+= Bhost[i*k+l]*Chost[l*n+j]; } // cout<<temp[i][j]<< " "; } // cout<<endl; } float sumError = 0; for(int i=0;i<m;++i){ for(int j=0;j<n;++j){ float errorMat = Ahost[i*n+j] - temp[i][j]; sumError += errorMat*errorMat; } } cout<<"RMS error : "<<sqrt(sumError/(m*n))<<endl; double s = clock(); sgd_CPU(Ahost,Bhost1,Chost1,epochs,lamda,alpha,m,n,k); double e = clock(); cout<<"CPU implementation took "<< (e-s)/CLOCKS_PER_SEC<<" seconds\n"; for(int i=0;i<m;++i){ for(int j=0;j<n;++j){ temp[i][j] = 0; for(int l=0;l<k;++l){ temp[i][j]+= Bhost1[i*k+l]*Chost1[l*n+j]; } // cout<<temp[i][j]<< " "; } // cout<<endl; } sumError = 0; for(int i=0;i<m;++i){ for(int j=0;j<n;++j){ float errorMat = Ahost[i*n+j] - temp[i][j]; sumError += errorMat*errorMat; } } cout<<"RMS error : "<<sqrt(sumError/(m*n))<<endl; return 0; }
28e063f247ab3115eada74d02a50cb340d2e3101.hip
// !!! This is a file automatically generated by hipify!!! #include "collision_solve_device.cuh" // #include "project_typedefs.hpp" #include <cstdio> #include <hip/hip_runtime.h> #include <time.h> #include <iostream> #include <climits> #include <algorithm> // std::sort #include <math.h> #include <vector> #include <thrust/sort.h> #include <thrust/execution_policy.h> #define BLOCKS 512 #define THREADS_PER_BLOCK 512 #define PRINT_TIMES 0 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); exit(code); } } bool particles_colliding_g(Particle p1, Particle p2) { // #if DEBUG == 1 if(isnan(p1.x)) { cout << "Nan in particles_colliding()\n"; } if(isnan(p2.x)) { cout << "Nan in particles_colliding()\n"; } if(isnan(p1.y)) { cout << "Nan in particles_colliding()\n"; } if(isnan(p2.y)) { cout << "Nan in particles_colliding()\n"; } if(isnan(p1.radius)) { cout << "Nan in particles_colliding()\n"; } if(isnan(p2.radius)) { cout << "Nan in particles_colliding()\n"; } #endif return ((p1.x - p2.x) * (p1.x - p2.x)) + ((p1.y - p2.y) * (p1.y - p2.y)) < (p1.radius + p2.radius) * (p1.radius + p2.radius); } __global__ void detect_collisions_GPU_naive_kernel ( Particle * dev_particles, bool * dev_output_collisions, unsigned int num_particles ) { unsigned pidx1 = blockIdx.x * blockDim.x + threadIdx.x; Particle * p1; Particle * p2; bool col; while(pidx1 < num_particles) { for(unsigned int pidx2 = pidx1 + 1; pidx2 < num_particles; pidx2++) { p1 = dev_particles + pidx1; p2 = dev_particles + pidx2; col = ((p1->x - p2->x) * (p1->x - p2->x)) + ((p1->y - p2->y) * (p1->y - p2->y)) < (p1->radius + p2->radius) * (p1->radius + p2->radius); if(col == true) { dev_output_collisions[(pidx1 * num_particles) + pidx2] = true; dev_output_collisions[(pidx2 * num_particles) + pidx1] = true; } } pidx1 += blockDim.x * gridDim.x; } } /* Implentation of naive algorithm on GPU. Index (x, y) is indexed by arr[(x * num_particles) + y] */ void detect_collisions_GPU_naive(Particle * particles, bool * output_collisions, unsigned int num_particles, float * comp_time, Particle * dev_particles, bool * dev_output_collisions ) { const clock_t begin_time = clock(); #if PRINT_TIMES == 1 const clock_t copy_start_time = clock(); #endif // Set values for data on GPU gpuErrchk(hipMemset(dev_output_collisions, (int)false, sizeof(bool) * num_particles * num_particles)); #if PRINT_TIMES == 1 hipDeviceSynchronize(); float cpoy_runtime = float( clock () - copy_start_time ) / CLOCKS_PER_SEC; printf("First copy takes %fs\n", cpoy_runtime); #endif #if PRINT_TIMES == 1 const clock_t kernel_start_time = clock(); #endif // Call kernel hipLaunchKernelGGL(( detect_collisions_GPU_naive_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, dev_particles, dev_output_collisions, num_particles); #if PRINT_TIMES == 1 hipDeviceSynchronize(); float kernel_runtime = float( clock () - kernel_start_time ) / CLOCKS_PER_SEC; printf("Kernel takes %fs\n", kernel_runtime); #endif // check for an error in kernal call hipError_t err2 = hipGetLastError(); if(err2 != hipSuccess) { printf("%s\n", hipGetErrorString(err2)); } #if PRINT_TIMES == 1 const clock_t cpoy_back_start_time = clock(); #endif // Copy data back gpuErrchk(hipMemcpy(output_collisions, dev_output_collisions, sizeof(bool) * num_particles * num_particles, hipMemcpyDeviceToHost)); #if PRINT_TIMES == 1 hipDeviceSynchronize(); float copy_back_runtime = float( clock () - cpoy_back_start_time ) / CLOCKS_PER_SEC; printf("Copy back takes %fs\n", copy_back_runtime); #endif *comp_time = float( clock () - begin_time ) / CLOCKS_PER_SEC; } __global__ void detect_collisions_GPU_optimized1_kernal_s1( unsigned int num_particles, Particle * dev_particles, // len: num_particles bool * dev_output_collisions, // len: num_particles^2. Index (x, y) is indexed by arr[(x * num_particles) + y] ParticleBound * bounds_x, // len: num_particles * 2 ParticleBound * bounds_y, // len: num_particles * 2 int * dev_active_particles, // len: num_particles int * dev_active_particles_len, // a single int float * x_sum, float * y_sum) { extern __shared__ float locations_for_sum[]; // first 'THREADS_PER_BLOCK' are x, second 'THREADS_PER_BLOCK' are y // we do redection to find sum (and therfore mean) of x and y positions locations_for_sum[threadIdx.x] = 0.; // set x to 0 locations_for_sum[threadIdx.x + THREADS_PER_BLOCK] = 0.; // set y to 0 unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // Fill bounds_x and bound_y while(i < num_particles) { bounds_x[2 * i].position = dev_particles[i].x - dev_particles[i].radius; bounds_x[2 * i].is_begin = true; bounds_x[2 * i].particle_idx = i; bounds_x[2 * i + 1].position = dev_particles[i].x + dev_particles[i].radius; bounds_x[2 * i + 1].is_begin = false; bounds_x[2 * i + 1].particle_idx = i; locations_for_sum[threadIdx.x] = dev_particles[i].x; bounds_y[2 * i].position = dev_particles[i].y - dev_particles[i].radius; bounds_y[2 * i].is_begin = true; bounds_y[2 * i].particle_idx = i; bounds_y[2 * i + 1].position = dev_particles[i].y + dev_particles[i].radius; bounds_y[2 * i + 1].is_begin = false; bounds_y[2 * i + 1].particle_idx = i; locations_for_sum[threadIdx.x + THREADS_PER_BLOCK] = dev_particles[i].y; __syncthreads(); // Implement reduction to get sums for(uint s = blockDim.x / 2; s > 0; s>>=1) { if(threadIdx.x < s) { locations_for_sum[threadIdx.x] += locations_for_sum[threadIdx.x + s]; locations_for_sum[threadIdx.x + THREADS_PER_BLOCK] += locations_for_sum[threadIdx.x + THREADS_PER_BLOCK + s]; } __syncthreads(); } if(threadIdx.x == 0) { atomicAdd(x_sum, locations_for_sum[0]); atomicAdd(y_sum, locations_for_sum[THREADS_PER_BLOCK]); } __syncthreads(); i += blockDim.x * gridDim.x; } } __global__ void detect_collisions_GPU_optimized1_kernal_s2( unsigned int num_particles, Particle * dev_particles, // len: num_particles bool * dev_output_collisions, // len: num_particles^2. Index (x, y) is indexed by arr[(x * num_particles) + y] ParticleBound * bounds_x, // len: num_particles * 2 ParticleBound * bounds_y, // len: num_particles * 2 int * dev_active_particles, // len: num_particles int * dev_active_particles_len, // a single int float * x_mean, float * y_mean, float * x_var, float * y_var ) { extern __shared__ float vars_shared[]; // The first 'THREADS_PER_BLOCK' elements are for x, second are for y unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // Turn sum into mean if(i == 0) { *x_mean = *x_mean / (float) num_particles; *y_mean = *y_mean / (float) num_particles; } __syncthreads(); // Find variance while(i < num_particles * 2) { vars_shared[threadIdx.x] = (bounds_x[i].position - *x_mean) * (bounds_x[i].position - *x_mean); // x axis vars_shared[threadIdx.x + THREADS_PER_BLOCK] = (bounds_y[i].position - *y_mean) * (bounds_y[i].position - *y_mean); // y axis __syncthreads(); // Reduction for(uint s = blockDim.x / 2; s > 0; s>>=1) { if(threadIdx.x < s) { vars_shared[threadIdx.x] += vars_shared[threadIdx.x + s]; vars_shared[threadIdx.x + THREADS_PER_BLOCK] += vars_shared[threadIdx.x + THREADS_PER_BLOCK + s]; } __syncthreads(); } if(threadIdx.x == 0) { atomicAdd(x_var, vars_shared[0]); atomicAdd(y_var, vars_shared[THREADS_PER_BLOCK]); } __syncthreads(); i += blockDim.x * gridDim.x; } } void detect_collisions_GPU_optimized1(Particle * particles, bool * output_collisions, unsigned int num_particles, float * comp_time, Particle * dev_particles, bool * dev_output_collisions, ParticleBound * bounds_x, ParticleBound * bounds_y, int * dev_active_particles, // indexes (wrt 'particles') of the active particles int * dev_active_particles_len ) { // Not including malloc in the time measurement bc in practice, we would only malloc once. Everything we are // tracking time for is something that would be executed in a loop float * dev_x_mean; float * dev_y_mean; gpuErrchk(hipMalloc((void**)&dev_x_mean, sizeof(float))); gpuErrchk(hipMalloc((void**)&dev_y_mean, sizeof(float))); gpuErrchk(hipMemset(dev_x_mean, 0, sizeof(float))); gpuErrchk(hipMemset(dev_y_mean, 0, sizeof(float))); float * dev_x_var; float * dev_y_var; gpuErrchk(hipMalloc((void**)&dev_x_var, sizeof(float))); gpuErrchk(hipMalloc((void**)&dev_y_var, sizeof(float))); gpuErrchk(hipMemset(dev_x_var, 0, sizeof(float))); gpuErrchk(hipMemset(dev_y_var, 0, sizeof(float))); const clock_t begin_time = clock(); // Set initial values for data on GPU gpuErrchk(hipMemset(dev_output_collisions, (int)false, sizeof(bool) * num_particles * num_particles)); // Call kernel to set bounds arrays and calculate sums (of x and y positions) for means. hipLaunchKernelGGL(( detect_collisions_GPU_optimized1_kernal_s1), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), sizeof(float) * THREADS_PER_BLOCK * 2, 0, num_particles, dev_particles, dev_output_collisions, bounds_x, bounds_y, dev_active_particles, dev_active_particles_len, dev_x_mean, dev_y_mean); // check for an error in kernal call hipError_t err1 = hipGetLastError(); if(err1 != hipSuccess) { printf("%s\n", hipGetErrorString(err1)); } /// Call kernel to find variance in x and y positions hipLaunchKernelGGL(( detect_collisions_GPU_optimized1_kernal_s2), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), sizeof(float) * THREADS_PER_BLOCK * 2, 0, num_particles, dev_particles, dev_output_collisions, bounds_x, bounds_y, dev_active_particles, dev_active_particles_len, dev_x_mean, dev_y_mean, dev_x_var, dev_y_var); // check for an error in kernal call hipError_t err2 = hipGetLastError(); if(err2 != hipSuccess) { printf("%s\n", hipGetErrorString(err2)); } float x_var; float y_var; gpuErrchk(hipMemcpy(&x_var, dev_x_var, sizeof(float), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(&y_var, dev_y_var, sizeof(float), hipMemcpyDeviceToHost)); // Choose higher variance ParticleBound * chosen_bounds; // Device if(x_var > y_var) { chosen_bounds = bounds_x; } else { chosen_bounds = bounds_y; } // Sort the chosen_bounds array // For this, I used the Cuda Thrust library, which has a parallel sorting function. thrust::sort(thrust::device, chosen_bounds, chosen_bounds + num_particles * 2); // check for an error in kernal call hipError_t err3 = hipGetLastError(); if(err3 != hipSuccess) { printf("%s\n", hipGetErrorString(err3)); } ParticleBound * bounds_chosen = new ParticleBound[2*num_particles]; // Host hipMemcpy(bounds_chosen, chosen_bounds, sizeof(ParticleBound) * num_particles * 2, hipMemcpyDeviceToHost); // List of particle_idx's that are 'active', ie: we are still inside its bounds std::vector<unsigned int> active_particles; // Iterate through the sorted list of bounds // Note: did not make this into a kernel for reasons discussed in the readme for(unsigned long int i = 0; i < 2*num_particles; i++) { if(bounds_chosen[i].is_begin == true) { // Elements of active_particles are all potential collisions. // Check with each element of active_particles for(unsigned int active_idx = 0; active_idx < active_particles.size(); active_idx++) { if(particles_colliding_g(particles[bounds_chosen[i].particle_idx], particles[active_particles[active_idx]])) { output_collisions[(bounds_chosen[i].particle_idx * num_particles) + active_particles[active_idx]] = true; output_collisions[(active_particles[active_idx] * num_particles) + bounds_chosen[i].particle_idx] = true; } } active_particles.push_back(bounds_chosen[i].particle_idx); } else { // Remove bounds_chosen[i].particle_idx from active_particles active_particles.erase(remove(active_particles.begin(), active_particles.end(), bounds_chosen[i].particle_idx), active_particles.end()); } } // Copy data back to host // gpuErrchk(hipMemcpy(output_collisions, dev_output_collisions, sizeof(bool) * num_particles * num_particles, hipMemcpyDeviceToHost)); *comp_time = float( clock () - begin_time ) / CLOCKS_PER_SEC; hipFree(dev_x_mean); hipFree(dev_y_mean); hipFree(dev_x_var); hipFree(dev_y_var); } // void detect_collisions_GPU_optimized2(Particle * particles, // bool * output_collisions, // unsigned int num_particles, // float * comp_time) { // }
28e063f247ab3115eada74d02a50cb340d2e3101.cu
#include "collision_solve_device.cuh" // #include "project_typedefs.hpp" #include <cstdio> #include <cuda_runtime.h> #include <time.h> #include <iostream> #include <climits> #include <algorithm> // std::sort #include <math.h> #include <vector> #include <thrust/sort.h> #include <thrust/execution_policy.h> #define BLOCKS 512 #define THREADS_PER_BLOCK 512 #define PRINT_TIMES 0 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); exit(code); } } bool particles_colliding_g(Particle p1, Particle p2) { // #if DEBUG == 1 if(isnan(p1.x)) { cout << "Nan in particles_colliding()\n"; } if(isnan(p2.x)) { cout << "Nan in particles_colliding()\n"; } if(isnan(p1.y)) { cout << "Nan in particles_colliding()\n"; } if(isnan(p2.y)) { cout << "Nan in particles_colliding()\n"; } if(isnan(p1.radius)) { cout << "Nan in particles_colliding()\n"; } if(isnan(p2.radius)) { cout << "Nan in particles_colliding()\n"; } #endif return ((p1.x - p2.x) * (p1.x - p2.x)) + ((p1.y - p2.y) * (p1.y - p2.y)) < (p1.radius + p2.radius) * (p1.radius + p2.radius); } __global__ void detect_collisions_GPU_naive_kernel ( Particle * dev_particles, bool * dev_output_collisions, unsigned int num_particles ) { unsigned pidx1 = blockIdx.x * blockDim.x + threadIdx.x; Particle * p1; Particle * p2; bool col; while(pidx1 < num_particles) { for(unsigned int pidx2 = pidx1 + 1; pidx2 < num_particles; pidx2++) { p1 = dev_particles + pidx1; p2 = dev_particles + pidx2; col = ((p1->x - p2->x) * (p1->x - p2->x)) + ((p1->y - p2->y) * (p1->y - p2->y)) < (p1->radius + p2->radius) * (p1->radius + p2->radius); if(col == true) { dev_output_collisions[(pidx1 * num_particles) + pidx2] = true; dev_output_collisions[(pidx2 * num_particles) + pidx1] = true; } } pidx1 += blockDim.x * gridDim.x; } } /* Implentation of naive algorithm on GPU. Index (x, y) is indexed by arr[(x * num_particles) + y] */ void detect_collisions_GPU_naive(Particle * particles, bool * output_collisions, unsigned int num_particles, float * comp_time, Particle * dev_particles, bool * dev_output_collisions ) { const clock_t begin_time = clock(); #if PRINT_TIMES == 1 const clock_t copy_start_time = clock(); #endif // Set values for data on GPU gpuErrchk(cudaMemset(dev_output_collisions, (int)false, sizeof(bool) * num_particles * num_particles)); #if PRINT_TIMES == 1 cudaDeviceSynchronize(); float cpoy_runtime = float( clock () - copy_start_time ) / CLOCKS_PER_SEC; printf("First copy takes %fs\n", cpoy_runtime); #endif #if PRINT_TIMES == 1 const clock_t kernel_start_time = clock(); #endif // Call kernel detect_collisions_GPU_naive_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(dev_particles, dev_output_collisions, num_particles); #if PRINT_TIMES == 1 cudaDeviceSynchronize(); float kernel_runtime = float( clock () - kernel_start_time ) / CLOCKS_PER_SEC; printf("Kernel takes %fs\n", kernel_runtime); #endif // check for an error in kernal call cudaError err2 = cudaGetLastError(); if(err2 != cudaSuccess) { printf("%s\n", cudaGetErrorString(err2)); } #if PRINT_TIMES == 1 const clock_t cpoy_back_start_time = clock(); #endif // Copy data back gpuErrchk(cudaMemcpy(output_collisions, dev_output_collisions, sizeof(bool) * num_particles * num_particles, cudaMemcpyDeviceToHost)); #if PRINT_TIMES == 1 cudaDeviceSynchronize(); float copy_back_runtime = float( clock () - cpoy_back_start_time ) / CLOCKS_PER_SEC; printf("Copy back takes %fs\n", copy_back_runtime); #endif *comp_time = float( clock () - begin_time ) / CLOCKS_PER_SEC; } __global__ void detect_collisions_GPU_optimized1_kernal_s1( unsigned int num_particles, Particle * dev_particles, // len: num_particles bool * dev_output_collisions, // len: num_particles^2. Index (x, y) is indexed by arr[(x * num_particles) + y] ParticleBound * bounds_x, // len: num_particles * 2 ParticleBound * bounds_y, // len: num_particles * 2 int * dev_active_particles, // len: num_particles int * dev_active_particles_len, // a single int float * x_sum, float * y_sum) { extern __shared__ float locations_for_sum[]; // first 'THREADS_PER_BLOCK' are x, second 'THREADS_PER_BLOCK' are y // we do redection to find sum (and therfore mean) of x and y positions locations_for_sum[threadIdx.x] = 0.; // set x to 0 locations_for_sum[threadIdx.x + THREADS_PER_BLOCK] = 0.; // set y to 0 unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // Fill bounds_x and bound_y while(i < num_particles) { bounds_x[2 * i].position = dev_particles[i].x - dev_particles[i].radius; bounds_x[2 * i].is_begin = true; bounds_x[2 * i].particle_idx = i; bounds_x[2 * i + 1].position = dev_particles[i].x + dev_particles[i].radius; bounds_x[2 * i + 1].is_begin = false; bounds_x[2 * i + 1].particle_idx = i; locations_for_sum[threadIdx.x] = dev_particles[i].x; bounds_y[2 * i].position = dev_particles[i].y - dev_particles[i].radius; bounds_y[2 * i].is_begin = true; bounds_y[2 * i].particle_idx = i; bounds_y[2 * i + 1].position = dev_particles[i].y + dev_particles[i].radius; bounds_y[2 * i + 1].is_begin = false; bounds_y[2 * i + 1].particle_idx = i; locations_for_sum[threadIdx.x + THREADS_PER_BLOCK] = dev_particles[i].y; __syncthreads(); // Implement reduction to get sums for(uint s = blockDim.x / 2; s > 0; s>>=1) { if(threadIdx.x < s) { locations_for_sum[threadIdx.x] += locations_for_sum[threadIdx.x + s]; locations_for_sum[threadIdx.x + THREADS_PER_BLOCK] += locations_for_sum[threadIdx.x + THREADS_PER_BLOCK + s]; } __syncthreads(); } if(threadIdx.x == 0) { atomicAdd(x_sum, locations_for_sum[0]); atomicAdd(y_sum, locations_for_sum[THREADS_PER_BLOCK]); } __syncthreads(); i += blockDim.x * gridDim.x; } } __global__ void detect_collisions_GPU_optimized1_kernal_s2( unsigned int num_particles, Particle * dev_particles, // len: num_particles bool * dev_output_collisions, // len: num_particles^2. Index (x, y) is indexed by arr[(x * num_particles) + y] ParticleBound * bounds_x, // len: num_particles * 2 ParticleBound * bounds_y, // len: num_particles * 2 int * dev_active_particles, // len: num_particles int * dev_active_particles_len, // a single int float * x_mean, float * y_mean, float * x_var, float * y_var ) { extern __shared__ float vars_shared[]; // The first 'THREADS_PER_BLOCK' elements are for x, second are for y unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // Turn sum into mean if(i == 0) { *x_mean = *x_mean / (float) num_particles; *y_mean = *y_mean / (float) num_particles; } __syncthreads(); // Find variance while(i < num_particles * 2) { vars_shared[threadIdx.x] = (bounds_x[i].position - *x_mean) * (bounds_x[i].position - *x_mean); // x axis vars_shared[threadIdx.x + THREADS_PER_BLOCK] = (bounds_y[i].position - *y_mean) * (bounds_y[i].position - *y_mean); // y axis __syncthreads(); // Reduction for(uint s = blockDim.x / 2; s > 0; s>>=1) { if(threadIdx.x < s) { vars_shared[threadIdx.x] += vars_shared[threadIdx.x + s]; vars_shared[threadIdx.x + THREADS_PER_BLOCK] += vars_shared[threadIdx.x + THREADS_PER_BLOCK + s]; } __syncthreads(); } if(threadIdx.x == 0) { atomicAdd(x_var, vars_shared[0]); atomicAdd(y_var, vars_shared[THREADS_PER_BLOCK]); } __syncthreads(); i += blockDim.x * gridDim.x; } } void detect_collisions_GPU_optimized1(Particle * particles, bool * output_collisions, unsigned int num_particles, float * comp_time, Particle * dev_particles, bool * dev_output_collisions, ParticleBound * bounds_x, ParticleBound * bounds_y, int * dev_active_particles, // indexes (wrt 'particles') of the active particles int * dev_active_particles_len ) { // Not including malloc in the time measurement bc in practice, we would only malloc once. Everything we are // tracking time for is something that would be executed in a loop float * dev_x_mean; float * dev_y_mean; gpuErrchk(cudaMalloc((void**)&dev_x_mean, sizeof(float))); gpuErrchk(cudaMalloc((void**)&dev_y_mean, sizeof(float))); gpuErrchk(cudaMemset(dev_x_mean, 0, sizeof(float))); gpuErrchk(cudaMemset(dev_y_mean, 0, sizeof(float))); float * dev_x_var; float * dev_y_var; gpuErrchk(cudaMalloc((void**)&dev_x_var, sizeof(float))); gpuErrchk(cudaMalloc((void**)&dev_y_var, sizeof(float))); gpuErrchk(cudaMemset(dev_x_var, 0, sizeof(float))); gpuErrchk(cudaMemset(dev_y_var, 0, sizeof(float))); const clock_t begin_time = clock(); // Set initial values for data on GPU gpuErrchk(cudaMemset(dev_output_collisions, (int)false, sizeof(bool) * num_particles * num_particles)); // Call kernel to set bounds arrays and calculate sums (of x and y positions) for means. detect_collisions_GPU_optimized1_kernal_s1<<<BLOCKS, THREADS_PER_BLOCK, sizeof(float) * THREADS_PER_BLOCK * 2>>>(num_particles, dev_particles, dev_output_collisions, bounds_x, bounds_y, dev_active_particles, dev_active_particles_len, dev_x_mean, dev_y_mean); // check for an error in kernal call cudaError err1 = cudaGetLastError(); if(err1 != cudaSuccess) { printf("%s\n", cudaGetErrorString(err1)); } /// Call kernel to find variance in x and y positions detect_collisions_GPU_optimized1_kernal_s2<<<BLOCKS, THREADS_PER_BLOCK, sizeof(float) * THREADS_PER_BLOCK * 2>>>(num_particles, dev_particles, dev_output_collisions, bounds_x, bounds_y, dev_active_particles, dev_active_particles_len, dev_x_mean, dev_y_mean, dev_x_var, dev_y_var); // check for an error in kernal call cudaError err2 = cudaGetLastError(); if(err2 != cudaSuccess) { printf("%s\n", cudaGetErrorString(err2)); } float x_var; float y_var; gpuErrchk(cudaMemcpy(&x_var, dev_x_var, sizeof(float), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&y_var, dev_y_var, sizeof(float), cudaMemcpyDeviceToHost)); // Choose higher variance ParticleBound * chosen_bounds; // Device if(x_var > y_var) { chosen_bounds = bounds_x; } else { chosen_bounds = bounds_y; } // Sort the chosen_bounds array // For this, I used the Cuda Thrust library, which has a parallel sorting function. thrust::sort(thrust::device, chosen_bounds, chosen_bounds + num_particles * 2); // check for an error in kernal call cudaError err3 = cudaGetLastError(); if(err3 != cudaSuccess) { printf("%s\n", cudaGetErrorString(err3)); } ParticleBound * bounds_chosen = new ParticleBound[2*num_particles]; // Host cudaMemcpy(bounds_chosen, chosen_bounds, sizeof(ParticleBound) * num_particles * 2, cudaMemcpyDeviceToHost); // List of particle_idx's that are 'active', ie: we are still inside its bounds std::vector<unsigned int> active_particles; // Iterate through the sorted list of bounds // Note: did not make this into a kernel for reasons discussed in the readme for(unsigned long int i = 0; i < 2*num_particles; i++) { if(bounds_chosen[i].is_begin == true) { // Elements of active_particles are all potential collisions. // Check with each element of active_particles for(unsigned int active_idx = 0; active_idx < active_particles.size(); active_idx++) { if(particles_colliding_g(particles[bounds_chosen[i].particle_idx], particles[active_particles[active_idx]])) { output_collisions[(bounds_chosen[i].particle_idx * num_particles) + active_particles[active_idx]] = true; output_collisions[(active_particles[active_idx] * num_particles) + bounds_chosen[i].particle_idx] = true; } } active_particles.push_back(bounds_chosen[i].particle_idx); } else { // Remove bounds_chosen[i].particle_idx from active_particles active_particles.erase(remove(active_particles.begin(), active_particles.end(), bounds_chosen[i].particle_idx), active_particles.end()); } } // Copy data back to host // gpuErrchk(cudaMemcpy(output_collisions, dev_output_collisions, sizeof(bool) * num_particles * num_particles, cudaMemcpyDeviceToHost)); *comp_time = float( clock () - begin_time ) / CLOCKS_PER_SEC; cudaFree(dev_x_mean); cudaFree(dev_y_mean); cudaFree(dev_x_var); cudaFree(dev_y_var); } // void detect_collisions_GPU_optimized2(Particle * particles, // bool * output_collisions, // unsigned int num_particles, // float * comp_time) { // }
d6d8ee5da2e7470723425675c854eb3d062b17c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #if defined(_MSC_VER) || defined(__MINGW32__) //__MINGW32__ should goes before __GNUC__ #define JL_SIZE_T_SPECIFIER "%Iu" #define JL_SSIZE_T_SPECIFIER "%Id" #define JL_PTRDIFF_T_SPECIFIER "%Id" #elif defined(__GNUC__) #define JL_SIZE_T_SPECIFIER "%zu" #define JL_SSIZE_T_SPECIFIER "%zd" #define JL_PTRDIFF_T_SPECIFIER "%zd" #else // TODO figure out which to use. #if NUMBITS == 32 #define JL_SIZE_T_SPECIFIER something_unsigned #define JL_SSIZE_T_SPECIFIER something_signed #define JL_PTRDIFF_T_SPECIFIER something_signed #else #define JL_SIZE_T_SPECIFIER something_bigger_unsigned #define JL_SSIZE_T_SPECIFIER something_bigger_signed #define JL_PTRDIFF_T_SPECIFIER something-bigger_signed #endif #endif /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __device__ unsigned int deviceFun(unsigned int number) { return number; } /** * CUDA kernel function that reverses the order of bits in each element of the array. */ __global__ void cudaFun(void *data) { unsigned int *idata = (unsigned int*) data; idata[threadIdx.x] = deviceFun(idata[threadIdx.x]); } /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main2(void) { printf("Program begin..."); int deviceCount; hipGetDeviceCount(&deviceCount); printf("\nDevice count: %i", deviceCount); for (int dev = 0; dev < deviceCount; dev++) { hipDeviceProp_t deviceProp; CUDA_CHECK_RETURN(hipGetDeviceProperties(&deviceProp, dev)); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { printf("\nNo CUDA GPU has been detected"); return -1; } else if (deviceCount == 1) { //@@ WbLog is a provided logging API (similar to Log4J). //@@ The logging function wbLog takes a level which is either //@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG, or TRACE and a //@@ message to be printed. printf("\nThere is 1 device supporting CUDA"); } else { printf("\nThere are %i devices supporting CUDA", deviceCount); } } printf("\nDevice %i name: %s", dev, deviceProp.name); printf("\nComputational Capabilities: %i.%i",deviceProp.major,deviceProp.minor); printf("\nMaximum global memory size: "JL_SIZE_T_SPECIFIER, deviceProp.totalGlobalMem); printf("\nMaximum constant memory size: "JL_SIZE_T_SPECIFIER, deviceProp.totalConstMem); printf("\nMaximum shared memory size per block: %i",deviceProp.sharedMemPerBlock); printf("\nMaximum threads per block: %i",deviceProp.maxThreadsPerBlock); printf("\nMaximum block dimensions: %ix%ix%i", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf("\nMaximum grid dimensions: %ix%ix%i", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf("\nWarp size: %i",deviceProp.warpSize); } printf("\nProgram end..."); return 0; }
d6d8ee5da2e7470723425675c854eb3d062b17c5.cu
#include <stdio.h> #include <stdlib.h> #if defined(_MSC_VER) || defined(__MINGW32__) //__MINGW32__ should goes before __GNUC__ #define JL_SIZE_T_SPECIFIER "%Iu" #define JL_SSIZE_T_SPECIFIER "%Id" #define JL_PTRDIFF_T_SPECIFIER "%Id" #elif defined(__GNUC__) #define JL_SIZE_T_SPECIFIER "%zu" #define JL_SSIZE_T_SPECIFIER "%zd" #define JL_PTRDIFF_T_SPECIFIER "%zd" #else // TODO figure out which to use. #if NUMBITS == 32 #define JL_SIZE_T_SPECIFIER something_unsigned #define JL_SSIZE_T_SPECIFIER something_signed #define JL_PTRDIFF_T_SPECIFIER something_signed #else #define JL_SIZE_T_SPECIFIER something_bigger_unsigned #define JL_SSIZE_T_SPECIFIER something_bigger_signed #define JL_PTRDIFF_T_SPECIFIER something-bigger_signed #endif #endif /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __device__ unsigned int deviceFun(unsigned int number) { return number; } /** * CUDA kernel function that reverses the order of bits in each element of the array. */ __global__ void cudaFun(void *data) { unsigned int *idata = (unsigned int*) data; idata[threadIdx.x] = deviceFun(idata[threadIdx.x]); } /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main2(void) { printf("Program begin..."); int deviceCount; cudaGetDeviceCount(&deviceCount); printf("\nDevice count: %i", deviceCount); for (int dev = 0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; CUDA_CHECK_RETURN(cudaGetDeviceProperties(&deviceProp, dev)); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { printf("\nNo CUDA GPU has been detected"); return -1; } else if (deviceCount == 1) { //@@ WbLog is a provided logging API (similar to Log4J). //@@ The logging function wbLog takes a level which is either //@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG, or TRACE and a //@@ message to be printed. printf("\nThere is 1 device supporting CUDA"); } else { printf("\nThere are %i devices supporting CUDA", deviceCount); } } printf("\nDevice %i name: %s", dev, deviceProp.name); printf("\nComputational Capabilities: %i.%i",deviceProp.major,deviceProp.minor); printf("\nMaximum global memory size: "JL_SIZE_T_SPECIFIER, deviceProp.totalGlobalMem); printf("\nMaximum constant memory size: "JL_SIZE_T_SPECIFIER, deviceProp.totalConstMem); printf("\nMaximum shared memory size per block: %i",deviceProp.sharedMemPerBlock); printf("\nMaximum threads per block: %i",deviceProp.maxThreadsPerBlock); printf("\nMaximum block dimensions: %ix%ix%i", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf("\nMaximum grid dimensions: %ix%ix%i", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf("\nWarp size: %i",deviceProp.warpSize); } printf("\nProgram end..."); return 0; }
da20ad060b4b53d012bb387bd95b12d2d9498bd6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) // Compute C = A * B __global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here int Row=blockIdx.y*blockDim.y+threadIdx.y; int Col=blockIdx.x*blockDim.x+threadIdx.x; if((Row<numARows)&&(Col<numBColumns)) { float Cvalue=0.0; for(int i=0;i<numAColumns;i++) Cvalue+=A[Row*numAColumns+i]*B[Col+i*numBColumns]; C[Row*numCColumns+Col]=Cvalue; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = ( float * )wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = ( float * )wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix wbTime_stop(Generic, "Importing data and creating memory on host"); hostC = ( float *)malloc(numCRows * numCColumns * sizeof(float)); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here hipMalloc((void **)&deviceA,numARows*numAColumns*sizeof(float)); hipMalloc((void **)&deviceB,numBRows*numBColumns*sizeof(float)); hipMalloc((void **)&deviceC,numCRows*numCColumns*sizeof(float)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here hipMemcpy(deviceA,hostA,numARows*numAColumns*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(deviceB,hostB,numBRows*numBColumns*sizeof(float),hipMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here //dim3 DimGrid(1,1,1); //dim3 DimBlock(numCRows,numCColumns,1); dim3 DimGrid((numBColumns-1)/16+1,(numARows-1)/16+1,1); dim3 DimBlock(16,16,1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( matrixMultiply), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceA,deviceB,deviceC,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here hipMemcpy(hostC,deviceC,numCRows*numCColumns*sizeof(float),hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
da20ad060b4b53d012bb387bd95b12d2d9498bd6.cu
#include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) // Compute C = A * B __global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here int Row=blockIdx.y*blockDim.y+threadIdx.y; int Col=blockIdx.x*blockDim.x+threadIdx.x; if((Row<numARows)&&(Col<numBColumns)) { float Cvalue=0.0; for(int i=0;i<numAColumns;i++) Cvalue+=A[Row*numAColumns+i]*B[Col+i*numBColumns]; C[Row*numCColumns+Col]=Cvalue; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = ( float * )wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = ( float * )wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix wbTime_stop(Generic, "Importing data and creating memory on host"); hostC = ( float *)malloc(numCRows * numCColumns * sizeof(float)); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here cudaMalloc((void **)&deviceA,numARows*numAColumns*sizeof(float)); cudaMalloc((void **)&deviceB,numBRows*numBColumns*sizeof(float)); cudaMalloc((void **)&deviceC,numCRows*numCColumns*sizeof(float)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here cudaMemcpy(deviceA,hostA,numARows*numAColumns*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(deviceB,hostB,numBRows*numBColumns*sizeof(float),cudaMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here //dim3 DimGrid(1,1,1); //dim3 DimBlock(numCRows,numCColumns,1); dim3 DimGrid((numBColumns-1)/16+1,(numARows-1)/16+1,1); dim3 DimBlock(16,16,1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiply<<<DimGrid,DimBlock>>>(deviceA,deviceB,deviceC,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here cudaMemcpy(hostC,deviceC,numCRows*numCColumns*sizeof(float),cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
ec18d249763826fef72744d53420edcffc8562a8.hip
// !!! This is a file automatically generated by hipify!!! #include <drivers/convolution_driver.h> #include <core/errors.h> #include <device/cuda_utils.h> #include <device/device_defines.h> #include <device/handles.h> #include <device/gen_random.h> #include <nn/read_nn.h> #include <functions/dev_initializations.h> #include <functions/eval_convolution.h> #include <functions/dev_backprop_convolution.h> #include <functions/cnn_forward.h> #include <functions/cnn_backward.h> #include <functions/cnn_hessian_vec.h> #include <utilities/print_utils.h> #include <utilities/utils.h> #include <limits.h> #include <stdlib.h> #include <stdio.h> #include <float.h> #define DATASET_SIZE 256 inline int getMaxZSizes( int *elements, int count ) { int m = elements [ 0 ] - elements[ 1 ]; for (int i = 1; i < count; i ++) if ( m < (elements[ i+1 ] - elements[ i ] )) m = elements[ i+1 ] - elements[ i ]; return m; } void initCNNDataset( DEVICE_DATASET *data, SCRATCH_AREA *scratch, int h, int w, int ch, int k, int out_ch, int numClasses ) { int height_col = ( h - k ) + 1; int width_col = ( w - k ) + 1; int points = DATASET_SIZE; ; real counter=0; real val = 1; real *host = scratch->hostWorkspace; real *dev = scratch->devWorkspace; for (int p = 0; p < points; p ++) { for (int i = 0; i < ch; i ++){ //counter = (i + 1) * 10; // + (p+1)*100; //counter = ((real)i + 1) * 0.1; // + (p+1)*100; for (int c = 0; c < w; c ++) { for (int r = 0; r < h; r ++) { //host[ i * h * w + h * c + r + p * ch * h * w ] = (counter ++) ; //host[ i * h * w * points + h * c + r + p * h * w ] = 1; //(counter ++) ; //host[ i * h * w * points + h * c + r + p * h * w ] = counter; //(counter ++) ; host[ i * h * w * points + h * c + r + p * h * w ] = 1; //val += 0.1; } } } } cuda_malloc( (void **)&data->trainSetX, sizeof(real) * ch * h * w * points, 0, ERROR_MEM_ALLOC ); copy_host_device( host, data->trainSetX, sizeof(real) * ch * h * w * points, hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); /* getRandomVector( ch * h * w * points, NULL, data->trainSetX, RAND_UNIFORM ); real alpha = 0.1; cublasCheckError( hipblasDscal( cublasHandle, ch * h * w * points, &alpha, data->trainSetX, 1 ) ); writeVector( data->trainSetX, ch * h * w * points, "./cuda_dataset.txt", 0, host); */ readVector( host, ch * h * w * points, "./cuda_dataset.txt", 0, NULL ); copy_host_device( host, data->trainSetX, sizeof(real) * ch * h * w * points, hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); for (int p = 0; p < points; p ++) host[ p ] = 1; cuda_malloc( (void **)&data->trainSetY, sizeof(real) * points, 0, ERROR_MEM_ALLOC ); copy_host_device( host, data->trainSetY, sizeof(real) * points, hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); data->trainSizeX = points; data->trainSizeY = points; data->numClasses = numClasses; } void initVector( real *host, real *devPtr, int pSize ) { for (int i = 0; i < pSize; i ++ ) host[ i ] = 0.1; /* getRandomVector( pSize, NULL, devPtr, RAND_UNIFORM ); real alpha = 0.1; cublasCheckError( hipblasDscal( cublasHandle, pSize, &alpha, devPtr, 1 ) ); writeVector( devPtr, pSize, "./cuda_weights2.txt", 0, host); */ readVector( host, pSize, "./cuda_weights2.txt", 0, NULL ); copy_host_device( host, devPtr, sizeof(real) * pSize, hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); } void initWeights( CNN_MODEL *model, DEVICE_DATASET *data, real *hostPtr ) { real *weights = data->weights; int *wOffsets = model->wOffsets; int *bOffsets = model->bOffsets; int index = 0; fprintf( stderr, " Model Parameters: %d, .... %d \n", model->pSize, index ); for (int i = 0; i < model->pSize; i ++ ) hostPtr[ i ] = 0.1; copy_host_device( hostPtr, weights, sizeof(real) * model->pSize, hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); /* getRandomVector( model->pSize, NULL, weights, RAND_UNIFORM ); real alpha = 0.1; cublasCheckError( hipblasDscal( cublasHandle, model->pSize, &alpha, weights, 1 ) ); writeVector( weights, model->pSize, "./cuda_weights.txt", 0, hostPtr); */ readVector( hostPtr, model->pSize, "./cuda_weights.txt", 0, NULL ); copy_host_device( hostPtr, weights, sizeof (real) * model->pSize, hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); } void printGradient( real *data, int *wOffsets, int *bOffsets, CNN_MODEL *model ) { CONV_LAYER *c = & (model->convLayer[ 0 ] ); fprintf( stderr, "W0 -- \n"); print4DMatrix( data, c->outChannels, c->inChannels, c->kSize, c->kSize ); fprintf( stderr, "b0 -- \n"); print2DMatrix( data + c->outChannels * c->inChannels * c->kSize * c->kSize, 1, c->outChannels ); c = & (model->convLayer[ 1 ] ); fprintf( stderr, "W1 -- \n"); print4DMatrix( data + wOffsets[1], c->outChannels, c->inChannels, c->kSize, c->kSize ); fprintf( stderr, "b1 -- \n"); print2DMatrix( data + bOffsets[1], 1, c->outChannels ); FC_LAYER *f = & (model->fcLayer[ 0 ] ); fprintf( stderr, "W2 -- \n"); print2DMatrix( data + wOffsets[ 2 ], f->out, f->in ); fprintf( stderr, "b2 -- \n"); print2DMatrix( data + bOffsets[ 2 ], 1, f->out ); f = & (model->fcLayer[ 1 ] ); fprintf( stderr, "W3 -- \n"); print2DMatrix( data + wOffsets[ 3 ], f->out, f->in ); fprintf( stderr, "b3 -- \n"); print2DMatrix( data + bOffsets[ 3 ], 1, f->out ); f = & (model->fcLayer[ 2 ] ); fprintf( stderr, "W4 -- \n"); print2DMatrix( data + wOffsets[ 4 ], f->out, f->in ); fprintf( stderr, "b4 -- \n"); print2DMatrix( data + bOffsets[ 4 ], 1, f->out ); } void testCNN( CNN_MODEL *model, DEVICE_DATASET *data, SCRATCH_AREA *scratch ) { real ll = 0; int NUM_CLASSES = 10; initCNNDataset( data, scratch, 32, 32, 3, 3, 6, NUM_CLASSES); fprintf( stderr, "Dataset initialized \n"); //readTestCNN( model, 1, 1, 2, 6, 6, DATASET_SIZE); //readConvCNN( model, 2, 4, NUM_CLASSES, 6, 6, DATASET_SIZE); //readConv2CNN( model, 1, 1, NUM_CLASSES, 14, 14, DATASET_SIZE ); //readConv2CNN( model, 2, 4, NUM_CLASSES, 14, 14, DATASET_SIZE ); readLenetCNN( model, 3, 32, 32, DATASET_SIZE, 1, 0 ); fprintf( stderr, "Done with Network initialization... \n"); cnnInitializations( model, data ); fprintf( stderr, "Done with weights initialization\n"); //compute the gradient here. int maxDeltaSize = getMaxZSizes (model->zOffsets, model->cLayers + model->lLayers + 1 ); fprintf( stderr, "Max Z size is : %d \n", maxDeltaSize ); real *z = scratch->nextDevPtr; real *dx = z + model->zSize; real *gradient= dx + model->zSize; real *errors = gradient + model->pSize; real *errors_1 = errors + maxDeltaSize; real *lossFuncErrors = errors_1 + maxDeltaSize; real *rz = lossFuncErrors + maxDeltaSize; real *rerror = rz + model->zSize; real *probs = rerror + maxDeltaSize; real *vector = probs + DATASET_SIZE * data->numClasses; real *hv = vector + model->pSize; real *nextDevPtr = hv + model->pSize; real *hostPtr = scratch->nextHostPtr; scratch->nextDevPtr = nextDevPtr; real start, total; //initWeights( model, data, hostPtr ); /* copy_host_device( scratch->hostWorkspace, data->weights, sizeof(real) * (model->pSize), hipMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE ); printGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model ); */ /* fprintf( stderr, "Debug dataset... \n"); copy_host_device( hostPtr, data->trainSetX, DATASET_SIZE * 6 * 6 * 3 * sizeof(real), hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST ); print2DMatrix( hostPtr, DATASET_SIZE * 6 * 6, 3); exit( -1 ); */ start = Get_Time( ); ll = cnnForward( model, data, scratch, z, probs, lossFuncErrors, 0, DATASET_SIZE, MODEL_TRAIN ); fprintf( stderr, "Model Error is %f \n", ll ); //fprintf( stderr, "Error Vector is ---> \n"); //printVector( errors, 10, NULL, scratch->hostWorkspace ); fprintf( stderr, "Beginning BACKWARD PASS... \n"); copy_device( errors, lossFuncErrors, sizeof(real) * maxDeltaSize, ERROR_MEMCPY_DEVICE_DEVICE ); cnnBackward( model, data, nextDevPtr, z, gradient, dx, errors, errors_1, 0, model->batchSize, scratch->nextHostPtr ); copy_host_device( scratch->hostWorkspace, gradient, sizeof(real) * (model->pSize), hipMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE ); //printGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model ); printHostVector( scratch->hostWorkspace, model->pSize, NULL ); fprintf( stderr, "Done with Gradient......... \n\n\n\n\n"); /* fprintf( stderr, "Begin with HessianVector here... \n\n\n"); initVector( scratch->hostWorkspace, vector, model->pSize ); fprintf( stderr, "Done with vector initialization... \n"); cuda_memset( hv, 0, model->pSize * sizeof(real), ERROR_MEMSET ); fprintf( stderr, "Done with hv initialization... \n"); */ //copy_host_device( scratch->hostWorkspace, vector, sizeof(real) * model->pSize, // hipMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE ); //printGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model ); /* cnnHv ( model, data, z, probs, lossFuncErrors, dx, vector, hv, 0, DATASET_SIZE, nextDevPtr, scratch->nextHostPtr ); total = Get_Timing_Info( start ); fprintf( stderr, " Time to compute one hessian vec is: %g\n\n\n", total ); */ /* copy_host_device( scratch->hostWorkspace, hv, sizeof(real) * (model->pSize), hipMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE ); printGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model ); */ //revert back here. scratch->nextDevPtr = z; }
ec18d249763826fef72744d53420edcffc8562a8.cu
#include <drivers/convolution_driver.h> #include <core/errors.h> #include <device/cuda_utils.h> #include <device/device_defines.h> #include <device/handles.h> #include <device/gen_random.h> #include <nn/read_nn.h> #include <functions/dev_initializations.h> #include <functions/eval_convolution.h> #include <functions/dev_backprop_convolution.h> #include <functions/cnn_forward.h> #include <functions/cnn_backward.h> #include <functions/cnn_hessian_vec.h> #include <utilities/print_utils.h> #include <utilities/utils.h> #include <limits.h> #include <stdlib.h> #include <stdio.h> #include <float.h> #define DATASET_SIZE 256 inline int getMaxZSizes( int *elements, int count ) { int m = elements [ 0 ] - elements[ 1 ]; for (int i = 1; i < count; i ++) if ( m < (elements[ i+1 ] - elements[ i ] )) m = elements[ i+1 ] - elements[ i ]; return m; } void initCNNDataset( DEVICE_DATASET *data, SCRATCH_AREA *scratch, int h, int w, int ch, int k, int out_ch, int numClasses ) { int height_col = ( h - k ) + 1; int width_col = ( w - k ) + 1; int points = DATASET_SIZE; ; real counter=0; real val = 1; real *host = scratch->hostWorkspace; real *dev = scratch->devWorkspace; for (int p = 0; p < points; p ++) { for (int i = 0; i < ch; i ++){ //counter = (i + 1) * 10; // + (p+1)*100; //counter = ((real)i + 1) * 0.1; // + (p+1)*100; for (int c = 0; c < w; c ++) { for (int r = 0; r < h; r ++) { //host[ i * h * w + h * c + r + p * ch * h * w ] = (counter ++) ; //host[ i * h * w * points + h * c + r + p * h * w ] = 1; //(counter ++) ; //host[ i * h * w * points + h * c + r + p * h * w ] = counter; //(counter ++) ; host[ i * h * w * points + h * c + r + p * h * w ] = 1; //val += 0.1; } } } } cuda_malloc( (void **)&data->trainSetX, sizeof(real) * ch * h * w * points, 0, ERROR_MEM_ALLOC ); copy_host_device( host, data->trainSetX, sizeof(real) * ch * h * w * points, cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); /* getRandomVector( ch * h * w * points, NULL, data->trainSetX, RAND_UNIFORM ); real alpha = 0.1; cublasCheckError( cublasDscal( cublasHandle, ch * h * w * points, &alpha, data->trainSetX, 1 ) ); writeVector( data->trainSetX, ch * h * w * points, "./cuda_dataset.txt", 0, host); */ readVector( host, ch * h * w * points, "./cuda_dataset.txt", 0, NULL ); copy_host_device( host, data->trainSetX, sizeof(real) * ch * h * w * points, cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); for (int p = 0; p < points; p ++) host[ p ] = 1; cuda_malloc( (void **)&data->trainSetY, sizeof(real) * points, 0, ERROR_MEM_ALLOC ); copy_host_device( host, data->trainSetY, sizeof(real) * points, cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); data->trainSizeX = points; data->trainSizeY = points; data->numClasses = numClasses; } void initVector( real *host, real *devPtr, int pSize ) { for (int i = 0; i < pSize; i ++ ) host[ i ] = 0.1; /* getRandomVector( pSize, NULL, devPtr, RAND_UNIFORM ); real alpha = 0.1; cublasCheckError( cublasDscal( cublasHandle, pSize, &alpha, devPtr, 1 ) ); writeVector( devPtr, pSize, "./cuda_weights2.txt", 0, host); */ readVector( host, pSize, "./cuda_weights2.txt", 0, NULL ); copy_host_device( host, devPtr, sizeof(real) * pSize, cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); } void initWeights( CNN_MODEL *model, DEVICE_DATASET *data, real *hostPtr ) { real *weights = data->weights; int *wOffsets = model->wOffsets; int *bOffsets = model->bOffsets; int index = 0; fprintf( stderr, " Model Parameters: %d, .... %d \n", model->pSize, index ); for (int i = 0; i < model->pSize; i ++ ) hostPtr[ i ] = 0.1; copy_host_device( hostPtr, weights, sizeof(real) * model->pSize, cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); /* getRandomVector( model->pSize, NULL, weights, RAND_UNIFORM ); real alpha = 0.1; cublasCheckError( cublasDscal( cublasHandle, model->pSize, &alpha, weights, 1 ) ); writeVector( weights, model->pSize, "./cuda_weights.txt", 0, hostPtr); */ readVector( hostPtr, model->pSize, "./cuda_weights.txt", 0, NULL ); copy_host_device( hostPtr, weights, sizeof (real) * model->pSize, cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); } void printGradient( real *data, int *wOffsets, int *bOffsets, CNN_MODEL *model ) { CONV_LAYER *c = & (model->convLayer[ 0 ] ); fprintf( stderr, "W0 -- \n"); print4DMatrix( data, c->outChannels, c->inChannels, c->kSize, c->kSize ); fprintf( stderr, "b0 -- \n"); print2DMatrix( data + c->outChannels * c->inChannels * c->kSize * c->kSize, 1, c->outChannels ); c = & (model->convLayer[ 1 ] ); fprintf( stderr, "W1 -- \n"); print4DMatrix( data + wOffsets[1], c->outChannels, c->inChannels, c->kSize, c->kSize ); fprintf( stderr, "b1 -- \n"); print2DMatrix( data + bOffsets[1], 1, c->outChannels ); FC_LAYER *f = & (model->fcLayer[ 0 ] ); fprintf( stderr, "W2 -- \n"); print2DMatrix( data + wOffsets[ 2 ], f->out, f->in ); fprintf( stderr, "b2 -- \n"); print2DMatrix( data + bOffsets[ 2 ], 1, f->out ); f = & (model->fcLayer[ 1 ] ); fprintf( stderr, "W3 -- \n"); print2DMatrix( data + wOffsets[ 3 ], f->out, f->in ); fprintf( stderr, "b3 -- \n"); print2DMatrix( data + bOffsets[ 3 ], 1, f->out ); f = & (model->fcLayer[ 2 ] ); fprintf( stderr, "W4 -- \n"); print2DMatrix( data + wOffsets[ 4 ], f->out, f->in ); fprintf( stderr, "b4 -- \n"); print2DMatrix( data + bOffsets[ 4 ], 1, f->out ); } void testCNN( CNN_MODEL *model, DEVICE_DATASET *data, SCRATCH_AREA *scratch ) { real ll = 0; int NUM_CLASSES = 10; initCNNDataset( data, scratch, 32, 32, 3, 3, 6, NUM_CLASSES); fprintf( stderr, "Dataset initialized \n"); //readTestCNN( model, 1, 1, 2, 6, 6, DATASET_SIZE); //readConvCNN( model, 2, 4, NUM_CLASSES, 6, 6, DATASET_SIZE); //readConv2CNN( model, 1, 1, NUM_CLASSES, 14, 14, DATASET_SIZE ); //readConv2CNN( model, 2, 4, NUM_CLASSES, 14, 14, DATASET_SIZE ); readLenetCNN( model, 3, 32, 32, DATASET_SIZE, 1, 0 ); fprintf( stderr, "Done with Network initialization... \n"); cnnInitializations( model, data ); fprintf( stderr, "Done with weights initialization\n"); //compute the gradient here. int maxDeltaSize = getMaxZSizes (model->zOffsets, model->cLayers + model->lLayers + 1 ); fprintf( stderr, "Max Z size is : %d \n", maxDeltaSize ); real *z = scratch->nextDevPtr; real *dx = z + model->zSize; real *gradient= dx + model->zSize; real *errors = gradient + model->pSize; real *errors_1 = errors + maxDeltaSize; real *lossFuncErrors = errors_1 + maxDeltaSize; real *rz = lossFuncErrors + maxDeltaSize; real *rerror = rz + model->zSize; real *probs = rerror + maxDeltaSize; real *vector = probs + DATASET_SIZE * data->numClasses; real *hv = vector + model->pSize; real *nextDevPtr = hv + model->pSize; real *hostPtr = scratch->nextHostPtr; scratch->nextDevPtr = nextDevPtr; real start, total; //initWeights( model, data, hostPtr ); /* copy_host_device( scratch->hostWorkspace, data->weights, sizeof(real) * (model->pSize), cudaMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE ); printGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model ); */ /* fprintf( stderr, "Debug dataset... \n"); copy_host_device( hostPtr, data->trainSetX, DATASET_SIZE * 6 * 6 * 3 * sizeof(real), cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST ); print2DMatrix( hostPtr, DATASET_SIZE * 6 * 6, 3); exit( -1 ); */ start = Get_Time( ); ll = cnnForward( model, data, scratch, z, probs, lossFuncErrors, 0, DATASET_SIZE, MODEL_TRAIN ); fprintf( stderr, "Model Error is %f \n", ll ); //fprintf( stderr, "Error Vector is ---> \n"); //printVector( errors, 10, NULL, scratch->hostWorkspace ); fprintf( stderr, "Beginning BACKWARD PASS... \n"); copy_device( errors, lossFuncErrors, sizeof(real) * maxDeltaSize, ERROR_MEMCPY_DEVICE_DEVICE ); cnnBackward( model, data, nextDevPtr, z, gradient, dx, errors, errors_1, 0, model->batchSize, scratch->nextHostPtr ); copy_host_device( scratch->hostWorkspace, gradient, sizeof(real) * (model->pSize), cudaMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE ); //printGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model ); printHostVector( scratch->hostWorkspace, model->pSize, NULL ); fprintf( stderr, "Done with Gradient......... \n\n\n\n\n"); /* fprintf( stderr, "Begin with HessianVector here... \n\n\n"); initVector( scratch->hostWorkspace, vector, model->pSize ); fprintf( stderr, "Done with vector initialization... \n"); cuda_memset( hv, 0, model->pSize * sizeof(real), ERROR_MEMSET ); fprintf( stderr, "Done with hv initialization... \n"); */ //copy_host_device( scratch->hostWorkspace, vector, sizeof(real) * model->pSize, // cudaMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE ); //printGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model ); /* cnnHv ( model, data, z, probs, lossFuncErrors, dx, vector, hv, 0, DATASET_SIZE, nextDevPtr, scratch->nextHostPtr ); total = Get_Timing_Info( start ); fprintf( stderr, " Time to compute one hessian vec is: %g\n\n\n", total ); */ /* copy_host_device( scratch->hostWorkspace, hv, sizeof(real) * (model->pSize), cudaMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE ); printGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model ); */ //revert back here. scratch->nextDevPtr = z; }
90a5f959596e2bec6c393c97a8f84db35fcd18ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "reference_calc.cpp" #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { int rowIndex = threadIdx.x + blockDim.x * blockIdx.x; int colIndex = threadIdx.y + blockDim.y * blockIdx.y; int index = (rowIndex * numCols) + colIndex; if (rowIndex < numRows && colIndex < numCols) { float result = 0.f; //For every value in the filter around the pixel (c, r) for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) { for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) { //Find the global image position for this filter position //clamp to boundary of the image int image_r = min(max(rowIndex + filter_r, 0), static_cast<int>(numRows - 1)); int image_c = min(max(colIndex + filter_c, 0), static_cast<int>(numCols - 1)); float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]); float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2]; result += image_value * filter_value; } } outputChannel[index] = result; } } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { int rowIndex = threadIdx.x + blockDim.x * blockIdx.x; int colIndex = threadIdx.y + blockDim.y * blockIdx.y; int index = (rowIndex * numCols) + colIndex; if (rowIndex < numRows && colIndex < numCols) { uchar4 rgba = inputImageRGBA[index]; redChannel[index] = rgba.x; greenChannel[index] = rgba.y; blueChannel[index] = rgba.z; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { int rowIndex = threadIdx.x + blockDim.x * blockIdx.x; int colIndex = threadIdx.y + blockDim.y * blockIdx.y; int index = (rowIndex * numCols) + colIndex; if (rowIndex < numRows && colIndex < numCols) { unsigned char red = redChannel[index]; unsigned char green = greenChannel[index]; unsigned char blue = blueChannel[index]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[index] = outputPixel; } } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); int numFilterBytes = sizeof(float) * filterWidth * filterWidth; checkCudaErrors(hipMalloc(&d_filter, numFilterBytes)); checkCudaErrors(hipMemcpy(d_filter, h_filter, numFilterBytes, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { int threadLimit = 512; int blockWidth = trunc(sqrt(threadLimit)); int blockHeight = trunc(sqrt(threadLimit)); int numBlockRows = numRows / blockWidth + (numRows % blockWidth != 0); int numBlockCols = numCols / blockHeight + (numCols % blockHeight != 0); const dim3 blockSize(blockWidth, blockHeight, 1); const dim3 gridSize(numBlockRows, numBlockCols, 1); hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred,numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred,numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred,numRows, numCols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
90a5f959596e2bec6c393c97a8f84db35fcd18ab.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "reference_calc.cpp" #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { int rowIndex = threadIdx.x + blockDim.x * blockIdx.x; int colIndex = threadIdx.y + blockDim.y * blockIdx.y; int index = (rowIndex * numCols) + colIndex; if (rowIndex < numRows && colIndex < numCols) { float result = 0.f; //For every value in the filter around the pixel (c, r) for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) { for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) { //Find the global image position for this filter position //clamp to boundary of the image int image_r = min(max(rowIndex + filter_r, 0), static_cast<int>(numRows - 1)); int image_c = min(max(colIndex + filter_c, 0), static_cast<int>(numCols - 1)); float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]); float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2]; result += image_value * filter_value; } } outputChannel[index] = result; } } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { int rowIndex = threadIdx.x + blockDim.x * blockIdx.x; int colIndex = threadIdx.y + blockDim.y * blockIdx.y; int index = (rowIndex * numCols) + colIndex; if (rowIndex < numRows && colIndex < numCols) { uchar4 rgba = inputImageRGBA[index]; redChannel[index] = rgba.x; greenChannel[index] = rgba.y; blueChannel[index] = rgba.z; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { int rowIndex = threadIdx.x + blockDim.x * blockIdx.x; int colIndex = threadIdx.y + blockDim.y * blockIdx.y; int index = (rowIndex * numCols) + colIndex; if (rowIndex < numRows && colIndex < numCols) { unsigned char red = redChannel[index]; unsigned char green = greenChannel[index]; unsigned char blue = blueChannel[index]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[index] = outputPixel; } } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); int numFilterBytes = sizeof(float) * filterWidth * filterWidth; checkCudaErrors(cudaMalloc(&d_filter, numFilterBytes)); checkCudaErrors(cudaMemcpy(d_filter, h_filter, numFilterBytes, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { int threadLimit = 512; int blockWidth = trunc(sqrt(threadLimit)); int blockHeight = trunc(sqrt(threadLimit)); int numBlockRows = numRows / blockWidth + (numRows % blockWidth != 0); int numBlockCols = numCols / blockHeight + (numCols % blockHeight != 0); const dim3 blockSize(blockWidth, blockHeight, 1); const dim3 gridSize(numBlockRows, numBlockCols, 1); separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred,numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred,numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred,numRows, numCols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
6752e54f714e0e74ef254fa87302688a2d24b0a1.hip
// !!! This is a file automatically generated by hipify!!! #include <private/machine.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "include/machine_base.h" void createMachine(machine_t * machine, std::map<std::string, std::string> elements){ machine->base.avaliable_time = std::stoi(elements["RECOVER_TIME"]); machine->base.machine_no = atoi(&elements["EQP_ID"].c_str()[3]); } __device__ __host__ void initMachine(machine_t *machine){ machine_base_init(&machine->base); machine->base.ptr_derived_object = machine; }
6752e54f714e0e74ef254fa87302688a2d24b0a1.cu
#include <private/machine.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include "include/machine_base.h" void createMachine(machine_t * machine, std::map<std::string, std::string> elements){ machine->base.avaliable_time = std::stoi(elements["RECOVER_TIME"]); machine->base.machine_no = atoi(&elements["EQP_ID"].c_str()[3]); } __device__ __host__ void initMachine(machine_t *machine){ machine_base_init(&machine->base); machine->base.ptr_derived_object = machine; }
cfd649a376d31f3b4eb465947accece01689b865.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> int main(int argc, char **argv) { int nElem = 1024; dim3 block(1024); dim3 grid((nElem+block.x-1)/block.x); printf("grid.x %d block.x %d \n", grid.x, block.x); block.x = 512; grid.x = ((nElem+block.x-1) / block.x) printf("grid.x %d block.x %d \n", grid.x, block.x); block.x = 256; grid.x = ((nElem+block.x-1) / block.x) printf("grid.x %d block.x %d \n", grid.x, block.x); block.x = 128; grid.x = ((nElem+block.x-1) / block.x) printf("grid.x %d block.x %d \n", grid.x, block.x); hipDeviceReset(); return 0; }
cfd649a376d31f3b4eb465947accece01689b865.cu
#include <cuda_runtime.h> #include <stdio.h> int main(int argc, char **argv) { int nElem = 1024; dim3 block(1024); dim3 grid((nElem+block.x-1)/block.x); printf("grid.x %d block.x %d \n", grid.x, block.x); block.x = 512; grid.x = ((nElem+block.x-1) / block.x) printf("grid.x %d block.x %d \n", grid.x, block.x); block.x = 256; grid.x = ((nElem+block.x-1) / block.x) printf("grid.x %d block.x %d \n", grid.x, block.x); block.x = 128; grid.x = ((nElem+block.x-1) / block.x) printf("grid.x %d block.x %d \n", grid.x, block.x); cudaDeviceReset(); return 0; }
c1663193f6ef448a30a1668b6669201c3fa9d7f3.hip
// !!! This is a file automatically generated by hipify!!! #include "cameras.cu" #include "light_transport_common.cuh" namespace VLR { // Context-scope Variables rtDeclareVariable(optix::uint2, pv_imageSize, , ); rtDeclareVariable(uint32_t, pv_numAccumFrames, , ); rtDeclareVariable(ProgSigSampleLensPosition, pv_progSampleLensPosition, , ); rtDeclareVariable(ProgSigSampleIDF, pv_progSampleIDF, , ); rtBuffer<KernelRNG, 2> pv_rngBuffer; rtBuffer<SpectrumStorage, 2> pv_outputBuffer; //rtBuffer<RGBSpectrum, 2> pv_outputNormalBuffer; //rtBuffer<RGBSpectrum, 2> pv_outputAlbedoBuffer; #define CLAMPINTENSITY(c, clampValue) const float v=std::fmax(c.r,std::fmax(c.g, c.b)); \ if(v>clampValue){const float m=clampValue/v;c.r*=m; \ c.g*=m;c.b*=m; /* don't touch w */ } // Common Closest Hit Program for All Primitive Types and Materials RT_PROGRAM void pathTracingIteration() { // first hit is an object if (sm_payload.contribution.a == -1.0f) sm_payload.contribution.a = 1.f; KernelRNG &rng = sm_payload.rng; WavelengthSamples &wls = sm_payload.wls; SurfacePoint surfPt; float hypAreaPDF; calcSurfacePoint(&surfPt, &hypAreaPDF); const SurfaceMaterialDescriptor matDesc = pv_materialDescriptorBuffer[pv_materialIndex]; BSDF bsdf(matDesc, surfPt, wls); EDF edf(matDesc, surfPt, wls); Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-asVector3D(sm_ray.direction)); // implicit light sampling SampledSpectrum spEmittance = edf.evaluateEmittance(); if (spEmittance.hasNonZero()) { SampledSpectrum Le = spEmittance * edf.evaluate(EDFQuery(), dirOutLocal); float MISWeight = 1.0f; if (!sm_payload.prevSampledType.isDelta() && sm_ray.ray_type != RayType::Primary) { float bsdfPDF = sm_payload.prevDirPDF; float dist2 = surfPt.calcSquaredDistance(asPoint3D(sm_ray.origin)); float lightPDF = pv_importance / getSumLightImportances() * hypAreaPDF * dist2 / ::fabs(dirOutLocal.z); MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF); } sm_payload.contribution += sm_payload.alpha * Le * MISWeight; CLAMPINTENSITY(sm_payload.contribution, 10.f); } if (surfPt.atInfinity || sm_payload.maxLengthTerminate) return; // Russian roulette float continueProb = std::fmin(sm_payload.alpha.importance(wls.selectedLambdaIndex()) / sm_payload.initImportance, 1.0f); if (rng.getFloat0cTo1o() >= continueProb) return; sm_payload.alpha /= continueProb; Normal3D geomNormalLocal = surfPt.shadingFrame.toLocal(surfPt.geometricNormal); BSDFQuery fsQuery(dirOutLocal, geomNormalLocal, DirectionType::All(), wls); // get base color for denoiser /* if (sm_payload.contribution.a == -1.0f) { sm_payload.contribution.a = 1.f; /* const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[matDesc.bsdfProcedureSetIndex]; auto progGetBaseColor = (ProgSigBSDFGetBaseColor)procSet.progGetBaseColor; sm_payload.albedo = progGetBaseColor((const uint32_t *)&bsdf); // sm_payload.normal = RGBSpectrum(surfPt.geometricNormal.x, surfPt.geometricNormal.y, surfPt.geometricNormal.z); // sm_payload.normal = RGBSpectrum(geomNormalLocal.x, geomNormalLocal.y, geomNormalLocal.z); /* auto rotMat = Matrix4x4(pv_perspectiveCamera.orientation.toMatrix3x3()); //rotMat = rotateY(1.57f) * rotMat; rotMat = translate(pv_perspectiveCamera.position.x, pv_perspectiveCamera.position.y, pv_perspectiveCamera.position.z) * rotMat; rotMat = invert(rotMat); auto normalCam = normalize(rotMat * surfPt.geometricNormal); sm_payload.normal = RGBSpectrum(-normalCam.x, normalCam.y, -normalCam.z); */// } // Next Event Estimation (explicit light sampling) if (bsdf.hasNonDelta()) { SurfaceLight light; float lightProb; float uPrim; selectSurfaceLight(rng.getFloat0cTo1o(), &light, &lightProb, &uPrim); SurfaceLightPosSample lpSample(uPrim, rng.getFloat0cTo1o(), rng.getFloat0cTo1o()); SurfaceLightPosQueryResult lpResult; light.sample(lpSample, &lpResult); const SurfaceMaterialDescriptor lightMatDesc = pv_materialDescriptorBuffer[lpResult.materialIndex]; EDF ledf(lightMatDesc, lpResult.surfPt, wls); SampledSpectrum M = ledf.evaluateEmittance(); Vector3D shadowRayDir; float squaredDistance; float fractionalVisibility; SampledSpectrum shadow_color; if (M.hasNonZero() && testVisibility(surfPt, lpResult.surfPt, &shadowRayDir, &squaredDistance, &fractionalVisibility, &shadow_color)) { Vector3D shadowRayDir_l = lpResult.surfPt.toLocal(-shadowRayDir); Vector3D shadowRayDir_sn = surfPt.toLocal(shadowRayDir); SampledSpectrum Le = M * ledf.evaluate(EDFQuery(), shadowRayDir_l) *shadow_color; float lightPDF = lightProb * lpResult.areaPDF; SampledSpectrum fs = bsdf.evaluate(fsQuery, shadowRayDir_sn); float cosLight = lpResult.surfPt.calcCosTerm(-shadowRayDir); float bsdfPDF = bsdf.evaluatePDF(fsQuery, shadowRayDir_sn) * cosLight / squaredDistance; float MISWeight = 1.0f; if (!lpResult.posType.isDelta() && !std::isinf(lightPDF)) MISWeight = (lightPDF * lightPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF); float G = fractionalVisibility * absDot(shadowRayDir_sn, geomNormalLocal) * cosLight / squaredDistance; float scalarCoeff = G * MISWeight / lightPDF; // contributionCUDA sm_payload.contribution += sm_payload.alpha * Le * fs * scalarCoeff; CLAMPINTENSITY(sm_payload.contribution, 100.f); } } BSDFSample sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), rng.getFloat0cTo1o()); BSDFQueryResult fsResult; SampledSpectrum fs = bsdf.sample(fsQuery, sample, &fsResult); if (fs == SampledSpectrum::Zero() || fsResult.dirPDF == 0.0f) return; if (fsResult.sampledType.isDispersive() && !wls.singleIsSelected()) { fsResult.dirPDF /= SampledSpectrum::NumComponents(); wls.setSingleIsSelected(); } float cosFactor = dot(fsResult.dirLocal, geomNormalLocal); sm_payload.alpha *= fs * (::fabs(cosFactor) / fsResult.dirPDF); // CLAMPINTENSITY(sm_payload.alpha); Vector3D dirIn = surfPt.fromLocal(fsResult.dirLocal); sm_payload.origin = offsetRayOrigin(surfPt.position, cosFactor > 0.0f ? surfPt.geometricNormal : -surfPt.geometricNormal); sm_payload.direction = dirIn; sm_payload.prevDirPDF = fsResult.dirPDF; sm_payload.prevSampledType = fsResult.sampledType; sm_payload.terminate = false; } // JP: Intersection/Bounding Box ProgramClosest Hit Program // OptiXBVHLBVHAABB // Miss Program RT_PROGRAM void pathTracingMiss() { // first hit is the background, set alpha to if (sm_payload.contribution.a == -1.0f) { // sm_payload.albedo = spEmittance; sm_payload.contribution.a = 0.f; } if (pv_envLightDescriptor.importance == 0) return; Vector3D direction = asVector3D(sm_ray.direction); float phi, theta; direction.toPolarYUp(&theta, &phi); float sinPhi, cosPhi; VLR::sincos(phi, &sinPhi, &cosPhi); Vector3D texCoord0Dir = normalize(Vector3D(-cosPhi, 0.0f, -sinPhi)); ReferenceFrame shadingFrame; shadingFrame.x = texCoord0Dir; shadingFrame.z = -direction; shadingFrame.y = cross(shadingFrame.z, shadingFrame.x); SurfacePoint surfPt; surfPt.position = Point3D(direction.x, direction.y, direction.z); surfPt.shadingFrame = shadingFrame; surfPt.isPoint = false; surfPt.atInfinity = true; surfPt.geometricNormal = -direction; surfPt.u = phi; surfPt.v = theta; phi += pv_envLightDescriptor.body.asInfSphere.rotationPhi; phi = phi - ::floor(phi / (2 * M_PIf)) * 2 * M_PIf; surfPt.texCoord = TexCoord2D(phi / (2 * M_PIf), theta / M_PIf); float hypAreaPDF = evaluateEnvironmentAreaPDF(phi, theta); const SurfaceMaterialDescriptor matDesc = pv_materialDescriptorBuffer[pv_envLightDescriptor.materialIndex]; EDF edf(matDesc, surfPt, sm_payload.wls); Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-asVector3D(sm_ray.direction)); // implicit light sampling SampledSpectrum spEmittance = edf.evaluateEmittance(); if (spEmittance.hasNonZero()) { SampledSpectrum Le = spEmittance * edf.evaluate(EDFQuery(), dirOutLocal); float MISWeight = 1.0f; if (!sm_payload.prevSampledType.isDelta() && sm_ray.ray_type != RayType::Primary) { float bsdfPDF = sm_payload.prevDirPDF; float dist2 = surfPt.calcSquaredDistance(asPoint3D(sm_ray.origin)); float lightPDF = pv_envLightDescriptor.importance / getSumLightImportances() * hypAreaPDF * dist2 / ::fabs(dirOutLocal.z); MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF); } sm_payload.contribution += sm_payload.alpha * Le * MISWeight; CLAMPINTENSITY(sm_payload.contribution, 100.f); } } // Common Ray Generation Program for All Camera Types RT_PROGRAM void pathTracing() { KernelRNG rng = pv_rngBuffer[sm_launchIndex]; optix::float2 p = make_float2(sm_launchIndex.x + rng.getFloat0cTo1o(), sm_launchIndex.y + rng.getFloat0cTo1o()); float selectWLPDF; WavelengthSamples wls = WavelengthSamples::createWithEqualOffsets(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), &selectWLPDF); LensPosSample We0Sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o()); LensPosQueryResult We0Result; SampledSpectrum We0 = pv_progSampleLensPosition(wls, We0Sample, &We0Result); IDFSample We1Sample(p.x / pv_imageSize.x, p.y / pv_imageSize.y); IDFQueryResult We1Result; SampledSpectrum We1 = pv_progSampleIDF(We0Result.surfPt, wls, We1Sample, &We1Result); Vector3D rayDir = We0Result.surfPt.fromLocal(We1Result.dirLocal); SampledSpectrum alpha = (We0 * We1) * (We0Result.surfPt.calcCosTerm(rayDir) / (We0Result.areaPDF * We1Result.dirPDF * selectWLPDF)); optix::Ray ray = optix::make_Ray(asOptiXType(We0Result.surfPt.position), asOptiXType(rayDir), RayType::Primary, 0.0f, FLT_MAX); Payload payload; payload.maxLengthTerminate = false; payload.rng = rng; payload.initImportance = alpha.importance(wls.selectedLambdaIndex()); payload.wls = wls; payload.alpha = alpha; payload.contribution = SampledSpectrum::Zero(); payload.contribution.a = -1.0f; //payload.normal = SampledSpectrum(0.0, 1.0, 0.0); //payload.albedo = SampledSpectrum(-1.f, -1.f, -1.f); const uint32_t MaxPathLength = 25; uint32_t pathLength = 0; while (true) { payload.terminate = true; ++pathLength; if (pathLength >= MaxPathLength) payload.maxLengthTerminate = true; rtTrace(pv_topGroup, ray, payload); if (payload.terminate) break; VLRAssert(pathLength < MaxPathLength, "Path should be terminated... Something went wrong..."); ray = optix::make_Ray(asOptiXType(payload.origin), asOptiXType(payload.direction), RayType::Scattered, 0.0f, FLT_MAX); } pv_rngBuffer[sm_launchIndex] = payload.rng; if (!payload.contribution.allFinite()) { // vlrprintf("Pass %u, (%u, %u): Not a finite value.\n", pv_numAccumFrames, sm_launchIndex.x, sm_launchIndex.y); return; } if (pv_numAccumFrames == 1) { pv_outputBuffer[sm_launchIndex].reset(); // pv_outputNormalBuffer[sm_launchIndex] = payload.normal; // pv_outputAlbedoBuffer[sm_launchIndex] = payload.albedo; } //CLAMPINTENSITY(payload.contribution); pv_outputBuffer[sm_launchIndex].add(wls, payload.contribution); } // Exception Program RT_PROGRAM void exception() { //uint32_t code = rtGetExceptionCode(); rtPrintExceptionDetails(); } }
c1663193f6ef448a30a1668b6669201c3fa9d7f3.cu
#include "cameras.cu" #include "light_transport_common.cuh" namespace VLR { // Context-scope Variables rtDeclareVariable(optix::uint2, pv_imageSize, , ); rtDeclareVariable(uint32_t, pv_numAccumFrames, , ); rtDeclareVariable(ProgSigSampleLensPosition, pv_progSampleLensPosition, , ); rtDeclareVariable(ProgSigSampleIDF, pv_progSampleIDF, , ); rtBuffer<KernelRNG, 2> pv_rngBuffer; rtBuffer<SpectrumStorage, 2> pv_outputBuffer; //rtBuffer<RGBSpectrum, 2> pv_outputNormalBuffer; //rtBuffer<RGBSpectrum, 2> pv_outputAlbedoBuffer; #define CLAMPINTENSITY(c, clampValue) const float v=std::fmax(c.r,std::fmax(c.g, c.b)); \ if(v>clampValue){const float m=clampValue/v;c.r*=m; \ c.g*=m;c.b*=m; /* don't touch w */ } // Common Closest Hit Program for All Primitive Types and Materials RT_PROGRAM void pathTracingIteration() { // first hit is an object if (sm_payload.contribution.a == -1.0f) sm_payload.contribution.a = 1.f; KernelRNG &rng = sm_payload.rng; WavelengthSamples &wls = sm_payload.wls; SurfacePoint surfPt; float hypAreaPDF; calcSurfacePoint(&surfPt, &hypAreaPDF); const SurfaceMaterialDescriptor matDesc = pv_materialDescriptorBuffer[pv_materialIndex]; BSDF bsdf(matDesc, surfPt, wls); EDF edf(matDesc, surfPt, wls); Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-asVector3D(sm_ray.direction)); // implicit light sampling SampledSpectrum spEmittance = edf.evaluateEmittance(); if (spEmittance.hasNonZero()) { SampledSpectrum Le = spEmittance * edf.evaluate(EDFQuery(), dirOutLocal); float MISWeight = 1.0f; if (!sm_payload.prevSampledType.isDelta() && sm_ray.ray_type != RayType::Primary) { float bsdfPDF = sm_payload.prevDirPDF; float dist2 = surfPt.calcSquaredDistance(asPoint3D(sm_ray.origin)); float lightPDF = pv_importance / getSumLightImportances() * hypAreaPDF * dist2 / std::fabs(dirOutLocal.z); MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF); } sm_payload.contribution += sm_payload.alpha * Le * MISWeight; CLAMPINTENSITY(sm_payload.contribution, 10.f); } if (surfPt.atInfinity || sm_payload.maxLengthTerminate) return; // Russian roulette float continueProb = std::fmin(sm_payload.alpha.importance(wls.selectedLambdaIndex()) / sm_payload.initImportance, 1.0f); if (rng.getFloat0cTo1o() >= continueProb) return; sm_payload.alpha /= continueProb; Normal3D geomNormalLocal = surfPt.shadingFrame.toLocal(surfPt.geometricNormal); BSDFQuery fsQuery(dirOutLocal, geomNormalLocal, DirectionType::All(), wls); // get base color for denoiser /* if (sm_payload.contribution.a == -1.0f) { sm_payload.contribution.a = 1.f; /* const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[matDesc.bsdfProcedureSetIndex]; auto progGetBaseColor = (ProgSigBSDFGetBaseColor)procSet.progGetBaseColor; sm_payload.albedo = progGetBaseColor((const uint32_t *)&bsdf); // sm_payload.normal = RGBSpectrum(surfPt.geometricNormal.x, surfPt.geometricNormal.y, surfPt.geometricNormal.z); // sm_payload.normal = RGBSpectrum(geomNormalLocal.x, geomNormalLocal.y, geomNormalLocal.z); /* auto rotMat = Matrix4x4(pv_perspectiveCamera.orientation.toMatrix3x3()); //rotMat = rotateY(1.57f) * rotMat; rotMat = translate(pv_perspectiveCamera.position.x, pv_perspectiveCamera.position.y, pv_perspectiveCamera.position.z) * rotMat; rotMat = invert(rotMat); auto normalCam = normalize(rotMat * surfPt.geometricNormal); sm_payload.normal = RGBSpectrum(-normalCam.x, normalCam.y, -normalCam.z); */// } // Next Event Estimation (explicit light sampling) if (bsdf.hasNonDelta()) { SurfaceLight light; float lightProb; float uPrim; selectSurfaceLight(rng.getFloat0cTo1o(), &light, &lightProb, &uPrim); SurfaceLightPosSample lpSample(uPrim, rng.getFloat0cTo1o(), rng.getFloat0cTo1o()); SurfaceLightPosQueryResult lpResult; light.sample(lpSample, &lpResult); const SurfaceMaterialDescriptor lightMatDesc = pv_materialDescriptorBuffer[lpResult.materialIndex]; EDF ledf(lightMatDesc, lpResult.surfPt, wls); SampledSpectrum M = ledf.evaluateEmittance(); Vector3D shadowRayDir; float squaredDistance; float fractionalVisibility; SampledSpectrum shadow_color; if (M.hasNonZero() && testVisibility(surfPt, lpResult.surfPt, &shadowRayDir, &squaredDistance, &fractionalVisibility, &shadow_color)) { Vector3D shadowRayDir_l = lpResult.surfPt.toLocal(-shadowRayDir); Vector3D shadowRayDir_sn = surfPt.toLocal(shadowRayDir); SampledSpectrum Le = M * ledf.evaluate(EDFQuery(), shadowRayDir_l) *shadow_color; float lightPDF = lightProb * lpResult.areaPDF; SampledSpectrum fs = bsdf.evaluate(fsQuery, shadowRayDir_sn); float cosLight = lpResult.surfPt.calcCosTerm(-shadowRayDir); float bsdfPDF = bsdf.evaluatePDF(fsQuery, shadowRayDir_sn) * cosLight / squaredDistance; float MISWeight = 1.0f; if (!lpResult.posType.isDelta() && !std::isinf(lightPDF)) MISWeight = (lightPDF * lightPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF); float G = fractionalVisibility * absDot(shadowRayDir_sn, geomNormalLocal) * cosLight / squaredDistance; float scalarCoeff = G * MISWeight / lightPDF; // 直接contributionの計算式に入れるとCUDAのバグなのかおかしな結果になる。 sm_payload.contribution += sm_payload.alpha * Le * fs * scalarCoeff; CLAMPINTENSITY(sm_payload.contribution, 100.f); } } BSDFSample sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), rng.getFloat0cTo1o()); BSDFQueryResult fsResult; SampledSpectrum fs = bsdf.sample(fsQuery, sample, &fsResult); if (fs == SampledSpectrum::Zero() || fsResult.dirPDF == 0.0f) return; if (fsResult.sampledType.isDispersive() && !wls.singleIsSelected()) { fsResult.dirPDF /= SampledSpectrum::NumComponents(); wls.setSingleIsSelected(); } float cosFactor = dot(fsResult.dirLocal, geomNormalLocal); sm_payload.alpha *= fs * (std::fabs(cosFactor) / fsResult.dirPDF); // CLAMPINTENSITY(sm_payload.alpha); Vector3D dirIn = surfPt.fromLocal(fsResult.dirLocal); sm_payload.origin = offsetRayOrigin(surfPt.position, cosFactor > 0.0f ? surfPt.geometricNormal : -surfPt.geometricNormal); sm_payload.direction = dirIn; sm_payload.prevDirPDF = fsResult.dirPDF; sm_payload.prevSampledType = fsResult.sampledType; sm_payload.terminate = false; } // JP: 本当は無限大の球のIntersection/Bounding Box Programを使用して環境光に関する処理もClosest Hit Programで統一的に行いたい。 // が、OptiXのBVHビルダーがLBVHベースなので無限大のAABBを生成するのは危険。 // 仕方なくMiss Programで環境光を処理する。 RT_PROGRAM void pathTracingMiss() { // first hit is the background, set alpha to ° if (sm_payload.contribution.a == -1.0f) { // sm_payload.albedo = spEmittance; sm_payload.contribution.a = 0.f; } if (pv_envLightDescriptor.importance == 0) return; Vector3D direction = asVector3D(sm_ray.direction); float phi, theta; direction.toPolarYUp(&theta, &phi); float sinPhi, cosPhi; VLR::sincos(phi, &sinPhi, &cosPhi); Vector3D texCoord0Dir = normalize(Vector3D(-cosPhi, 0.0f, -sinPhi)); ReferenceFrame shadingFrame; shadingFrame.x = texCoord0Dir; shadingFrame.z = -direction; shadingFrame.y = cross(shadingFrame.z, shadingFrame.x); SurfacePoint surfPt; surfPt.position = Point3D(direction.x, direction.y, direction.z); surfPt.shadingFrame = shadingFrame; surfPt.isPoint = false; surfPt.atInfinity = true; surfPt.geometricNormal = -direction; surfPt.u = phi; surfPt.v = theta; phi += pv_envLightDescriptor.body.asInfSphere.rotationPhi; phi = phi - std::floor(phi / (2 * M_PIf)) * 2 * M_PIf; surfPt.texCoord = TexCoord2D(phi / (2 * M_PIf), theta / M_PIf); float hypAreaPDF = evaluateEnvironmentAreaPDF(phi, theta); const SurfaceMaterialDescriptor matDesc = pv_materialDescriptorBuffer[pv_envLightDescriptor.materialIndex]; EDF edf(matDesc, surfPt, sm_payload.wls); Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-asVector3D(sm_ray.direction)); // implicit light sampling SampledSpectrum spEmittance = edf.evaluateEmittance(); if (spEmittance.hasNonZero()) { SampledSpectrum Le = spEmittance * edf.evaluate(EDFQuery(), dirOutLocal); float MISWeight = 1.0f; if (!sm_payload.prevSampledType.isDelta() && sm_ray.ray_type != RayType::Primary) { float bsdfPDF = sm_payload.prevDirPDF; float dist2 = surfPt.calcSquaredDistance(asPoint3D(sm_ray.origin)); float lightPDF = pv_envLightDescriptor.importance / getSumLightImportances() * hypAreaPDF * dist2 / std::fabs(dirOutLocal.z); MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF); } sm_payload.contribution += sm_payload.alpha * Le * MISWeight; CLAMPINTENSITY(sm_payload.contribution, 100.f); } } // Common Ray Generation Program for All Camera Types RT_PROGRAM void pathTracing() { KernelRNG rng = pv_rngBuffer[sm_launchIndex]; optix::float2 p = make_float2(sm_launchIndex.x + rng.getFloat0cTo1o(), sm_launchIndex.y + rng.getFloat0cTo1o()); float selectWLPDF; WavelengthSamples wls = WavelengthSamples::createWithEqualOffsets(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), &selectWLPDF); LensPosSample We0Sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o()); LensPosQueryResult We0Result; SampledSpectrum We0 = pv_progSampleLensPosition(wls, We0Sample, &We0Result); IDFSample We1Sample(p.x / pv_imageSize.x, p.y / pv_imageSize.y); IDFQueryResult We1Result; SampledSpectrum We1 = pv_progSampleIDF(We0Result.surfPt, wls, We1Sample, &We1Result); Vector3D rayDir = We0Result.surfPt.fromLocal(We1Result.dirLocal); SampledSpectrum alpha = (We0 * We1) * (We0Result.surfPt.calcCosTerm(rayDir) / (We0Result.areaPDF * We1Result.dirPDF * selectWLPDF)); optix::Ray ray = optix::make_Ray(asOptiXType(We0Result.surfPt.position), asOptiXType(rayDir), RayType::Primary, 0.0f, FLT_MAX); Payload payload; payload.maxLengthTerminate = false; payload.rng = rng; payload.initImportance = alpha.importance(wls.selectedLambdaIndex()); payload.wls = wls; payload.alpha = alpha; payload.contribution = SampledSpectrum::Zero(); payload.contribution.a = -1.0f; //payload.normal = SampledSpectrum(0.0, 1.0, 0.0); //payload.albedo = SampledSpectrum(-1.f, -1.f, -1.f); const uint32_t MaxPathLength = 25; uint32_t pathLength = 0; while (true) { payload.terminate = true; ++pathLength; if (pathLength >= MaxPathLength) payload.maxLengthTerminate = true; rtTrace(pv_topGroup, ray, payload); if (payload.terminate) break; VLRAssert(pathLength < MaxPathLength, "Path should be terminated... Something went wrong..."); ray = optix::make_Ray(asOptiXType(payload.origin), asOptiXType(payload.direction), RayType::Scattered, 0.0f, FLT_MAX); } pv_rngBuffer[sm_launchIndex] = payload.rng; if (!payload.contribution.allFinite()) { // vlrprintf("Pass %u, (%u, %u): Not a finite value.\n", pv_numAccumFrames, sm_launchIndex.x, sm_launchIndex.y); return; } if (pv_numAccumFrames == 1) { pv_outputBuffer[sm_launchIndex].reset(); // pv_outputNormalBuffer[sm_launchIndex] = payload.normal; // pv_outputAlbedoBuffer[sm_launchIndex] = payload.albedo; } //CLAMPINTENSITY(payload.contribution); pv_outputBuffer[sm_launchIndex].add(wls, payload.contribution); } // Exception Program RT_PROGRAM void exception() { //uint32_t code = rtGetExceptionCode(); rtPrintExceptionDetails(); } }
e66b9e6d0dcfd3bd700e4abda0771589472b43ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ausmPlus.h" #include <stdio.h> __global__ void diffusiveFlux(cell *domain,double *R, double *gammma, double *mu,double wall_temp,double *k) { int x=blockIdx.x; int y=threadIdx.x; int note=-10; int faces=(int)domain[x].face[y]-1; int ourFlag=(int)domain[x].flag; double delu_delx=0.0,delv_delx=0.0,delu_dely=0.0,delv_dely=0.0; if(ourFlag==0 || ourFlag==4) { double x_cord[]={0,0},y_cord[]={0,0}; if(faces<0 || faces>42860) { note=y; } int i1,i2; if(ourFlag==4 && y==note) { i1=note; i2=(note+1)%4; x_cord[1]=0.5*(domain[x].nodes[i1][0]+domain[x].nodes[i2][0]); y_cord[1]=0.5*(domain[x].nodes[i1][1]+domain[x].nodes[i2][1]); } for (int i = 0; i < 4; ++i) { if(ourFlag!=4 || (ourFlag==4 && y!=note)) { //x_cordinate of the elements x_cord[0]+=0.25*(domain[x].nodes[i][0]); x_cord[1]+=0.25*(domain[faces].nodes[i][0]); //Y coordinate of the elements y_cord[0]+=0.25*(domain[x].nodes[i][1]); y_cord[1]+=0.25*(domain[faces].nodes[i][1]); } else { //x_cordinate of the elements x_cord[0]+=0.25*(domain[x].nodes[i][0]); //Y coordinate of the elements y_cord[0]+=0.25*(domain[x].nodes[i][1]); } } if(abs(x_cord[1]-x_cord[0])<=0.001) { delu_delx=0.0; delv_delx=0.0; } else { delu_delx=(domain[x].temp_var[y][1]/domain[x].temp_var[y][0]-domain[x].stateVar[1]/domain[x].stateVar[0])/(x_cord[1]-x_cord[0]); delv_delx=(domain[x].temp_var[y][2]/domain[x].temp_var[y][0]-domain[x].stateVar[2]/domain[x].stateVar[0])/(x_cord[1]-x_cord[0]); } if(abs(y_cord[1]-y_cord[0])<=0.001) { delu_dely=0.0; delv_dely=0.0; } else { delu_dely=(domain[x].temp_var[y][1]/domain[x].temp_var[y][0]-domain[x].stateVar[1]/domain[x].stateVar[0])/(y_cord[1]-y_cord[0]); delv_dely=(domain[x].temp_var[y][2]/domain[x].temp_var[y][0]-domain[x].stateVar[2]/domain[x].stateVar[0])/(y_cord[1]-y_cord[0]); } double tau_xx=2*mu[0]*(delu_delx-1/2*(delu_delx+delv_dely)); double tau_yy=2*mu[0]*(delv_dely-1/2*(delu_delx+delv_dely)); double tau_xy=mu[0]*(delu_dely+delv_delx); double temp[2]; temp[0]=(gammma[0]-1)/R[0]*(domain[x].stateVar[3]-0.5*(pow(domain[x].stateVar[1],2)+pow(domain[x].stateVar[2],2))/domain[x].stateVar[0])/domain[x].stateVar[0]; if(ourFlag!=4 || (ourFlag==4 && y!=note)) temp[1]=(gammma[0]-1)/R[0]*(domain[x].temp_var[y][3]-0.5*(pow(domain[x].temp_var[y][1],2)\ +pow(domain[x].temp_var[y][2],2))/domain[x].temp_var[y][0])/domain[x].temp_var[y][0]; else { temp[1]=wall_temp; } double delT_delx,delT_dely; if(abs(x_cord[1]-x_cord[0])<=0.001) delT_delx=0; else delT_delx=(temp[1]-temp[0])/(x_cord[1]-x_cord[0]); if(abs(y_cord[1]-y_cord[0])<=0.001) delT_dely=0; else delT_dely=(temp[1]-temp[0])/(y_cord[1]-y_cord[0]); double thetaX=domain[x].stateVar[1]/domain[x].stateVar[0]*tau_xx+domain[x].stateVar[2]/domain[x].stateVar[0]*tau_xy+k[0]*delT_delx; double thetaY=domain[x].stateVar[1]/domain[x].stateVar[0]*tau_xy+domain[x].stateVar[2]/domain[x].stateVar[0]*tau_yy+k[0]*delT_dely; domain[x].diffflux[y][0]=0; domain[x].diffflux[y][1]=(tau_xx*domain[x].norms[y][0]+tau_xy*domain[x].norms[y][1])\ *sqrt(pow(domain[x].nodes[y][0]-domain[x].nodes[(y+1)%4][0],2)+pow(domain[x].nodes[y][1]-domain[x].nodes[(y+1)%4][1],2)); domain[x].diffflux[y][2]=(tau_xy*domain[x].norms[y][0]+tau_yy*domain[x].norms[y][1])\ *sqrt(pow(domain[x].nodes[y][0]-domain[x].nodes[(y+1)%4][0],2)+pow(domain[x].nodes[y][1]-domain[x].nodes[(y+1)%4][1],2)); domain[x].diffflux[y][3]=(thetaX*domain[x].norms[y][0]+thetaY*domain[x].norms[y][1])\ *sqrt(pow(domain[x].nodes[y][0]-domain[x].nodes[(y+1)%4][0],2)+pow(domain[x].nodes[y][1]-domain[x].nodes[(y+1)%4][1],2)); } }
e66b9e6d0dcfd3bd700e4abda0771589472b43ac.cu
#include "ausmPlus.h" #include <stdio.h> __global__ void diffusiveFlux(cell *domain,double *R, double *gammma, double *mu,double wall_temp,double *k) { int x=blockIdx.x; int y=threadIdx.x; int note=-10; int faces=(int)domain[x].face[y]-1; int ourFlag=(int)domain[x].flag; double delu_delx=0.0,delv_delx=0.0,delu_dely=0.0,delv_dely=0.0; if(ourFlag==0 || ourFlag==4) { double x_cord[]={0,0},y_cord[]={0,0}; if(faces<0 || faces>42860) { note=y; } int i1,i2; if(ourFlag==4 && y==note) { i1=note; i2=(note+1)%4; x_cord[1]=0.5*(domain[x].nodes[i1][0]+domain[x].nodes[i2][0]); y_cord[1]=0.5*(domain[x].nodes[i1][1]+domain[x].nodes[i2][1]); } for (int i = 0; i < 4; ++i) { if(ourFlag!=4 || (ourFlag==4 && y!=note)) { //x_cordinate of the elements x_cord[0]+=0.25*(domain[x].nodes[i][0]); x_cord[1]+=0.25*(domain[faces].nodes[i][0]); //Y coordinate of the elements y_cord[0]+=0.25*(domain[x].nodes[i][1]); y_cord[1]+=0.25*(domain[faces].nodes[i][1]); } else { //x_cordinate of the elements x_cord[0]+=0.25*(domain[x].nodes[i][0]); //Y coordinate of the elements y_cord[0]+=0.25*(domain[x].nodes[i][1]); } } if(abs(x_cord[1]-x_cord[0])<=0.001) { delu_delx=0.0; delv_delx=0.0; } else { delu_delx=(domain[x].temp_var[y][1]/domain[x].temp_var[y][0]-domain[x].stateVar[1]/domain[x].stateVar[0])/(x_cord[1]-x_cord[0]); delv_delx=(domain[x].temp_var[y][2]/domain[x].temp_var[y][0]-domain[x].stateVar[2]/domain[x].stateVar[0])/(x_cord[1]-x_cord[0]); } if(abs(y_cord[1]-y_cord[0])<=0.001) { delu_dely=0.0; delv_dely=0.0; } else { delu_dely=(domain[x].temp_var[y][1]/domain[x].temp_var[y][0]-domain[x].stateVar[1]/domain[x].stateVar[0])/(y_cord[1]-y_cord[0]); delv_dely=(domain[x].temp_var[y][2]/domain[x].temp_var[y][0]-domain[x].stateVar[2]/domain[x].stateVar[0])/(y_cord[1]-y_cord[0]); } double tau_xx=2*mu[0]*(delu_delx-1/2*(delu_delx+delv_dely)); double tau_yy=2*mu[0]*(delv_dely-1/2*(delu_delx+delv_dely)); double tau_xy=mu[0]*(delu_dely+delv_delx); double temp[2]; temp[0]=(gammma[0]-1)/R[0]*(domain[x].stateVar[3]-0.5*(pow(domain[x].stateVar[1],2)+pow(domain[x].stateVar[2],2))/domain[x].stateVar[0])/domain[x].stateVar[0]; if(ourFlag!=4 || (ourFlag==4 && y!=note)) temp[1]=(gammma[0]-1)/R[0]*(domain[x].temp_var[y][3]-0.5*(pow(domain[x].temp_var[y][1],2)\ +pow(domain[x].temp_var[y][2],2))/domain[x].temp_var[y][0])/domain[x].temp_var[y][0]; else { temp[1]=wall_temp; } double delT_delx,delT_dely; if(abs(x_cord[1]-x_cord[0])<=0.001) delT_delx=0; else delT_delx=(temp[1]-temp[0])/(x_cord[1]-x_cord[0]); if(abs(y_cord[1]-y_cord[0])<=0.001) delT_dely=0; else delT_dely=(temp[1]-temp[0])/(y_cord[1]-y_cord[0]); double thetaX=domain[x].stateVar[1]/domain[x].stateVar[0]*tau_xx+domain[x].stateVar[2]/domain[x].stateVar[0]*tau_xy+k[0]*delT_delx; double thetaY=domain[x].stateVar[1]/domain[x].stateVar[0]*tau_xy+domain[x].stateVar[2]/domain[x].stateVar[0]*tau_yy+k[0]*delT_dely; domain[x].diffflux[y][0]=0; domain[x].diffflux[y][1]=(tau_xx*domain[x].norms[y][0]+tau_xy*domain[x].norms[y][1])\ *sqrt(pow(domain[x].nodes[y][0]-domain[x].nodes[(y+1)%4][0],2)+pow(domain[x].nodes[y][1]-domain[x].nodes[(y+1)%4][1],2)); domain[x].diffflux[y][2]=(tau_xy*domain[x].norms[y][0]+tau_yy*domain[x].norms[y][1])\ *sqrt(pow(domain[x].nodes[y][0]-domain[x].nodes[(y+1)%4][0],2)+pow(domain[x].nodes[y][1]-domain[x].nodes[(y+1)%4][1],2)); domain[x].diffflux[y][3]=(thetaX*domain[x].norms[y][0]+thetaY*domain[x].norms[y][1])\ *sqrt(pow(domain[x].nodes[y][0]-domain[x].nodes[(y+1)%4][0],2)+pow(domain[x].nodes[y][1]-domain[x].nodes[(y+1)%4][1],2)); } }
7afe331841278716b9eda444404487e7a68cf849.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <cstdlib> #include <cmath> #include <hip/hip_runtime_api.h> #define SQ(x) ((x) * (x)) static const float A = -4.0, B = 4.0; // limites de integracin static const int N = 1 << 22; // nmero de intervalos = 2^22 static const float H = (B - A) / N; // tamao del intervalo de integracin static const float PI(M_PI); // con precision simple __device__ float h(float x) { return .5f + 1.5f / (1.0f + 50.0f * SQ(x)); } float host_h(float x) { return .5f + 1.5f / (1.0f + 50.0f * SQ(x)); } __device__ float f(float x) { int i; float sum = 0.0f, x0; for (i = 0; i < 10; ++i){ x0 = -3.3f + i * 0.7f; sum += h(x - x0); } return sum/10.0f; } float host_f(float x) { int i; float sum = 0.0f, x0; for (i = 0; i < 10; ++i){ x0 = -3.3f + i * 0.7f; sum += host_h(x - x0); } return sum/10.0f; } __device__ float g(float x) { float c = cosf(2.0f * PI * f(x) * x); return expf(-x/16.0f) * SQ(c); } float host_g(float x) { float c = cosf(2.0f * PI * host_f(x) * x); return expf(-x/16.0f) * SQ(c); } __global__ void integrate_blocks(float subtotals[]) { // Inicializar variable __shared__ extern __shared__ float partialValues[]; int i = threadIdx.x + blockIdx.x * blockDim.x; int tx = threadIdx.x; // Cada thread segn su id, calcula la funcin de g(x) float x = 0; // Realizamos el clcul x = A + i * (H / 2); partialValues[tx] += (i % 2 == 0 ? 4 : 2) * g(x); // Sincronizamos las hebras una vez que termine __syncthreads(); // Hacer una suma por reduccion con los valores del arreglo subtotals[] for (int offset = blockDim.x/2; offset > 0; offset >>=1){ if (tx < offset) { //Aadimos una suma parcial con el offset partialValues[tx] += partialValues[tx+offset]; } __syncthreads(); } // Escribimos el resultado del primer elemento de nuestro arreglo if(tx == 0) { // Resultados lo guarda por bloques y no por hebras subtotals[blockIdx.x] = partialValues[0]; } } int main(int argc, char *argv[]) { // El programa recibe como parmetro el nmero de hebras por bloque. // Recuerden que este nmero debe ser mltiplo de 32 (tamao del warp) // y puede ser a lo ms 512 (limitacin del hardware). if (argc < 2) { std::cerr << "Usage: " << argv[0] << " threads_per_block" << std::endl; std::exit(1); } int block_size = std::atoi(argv[1]); // Al usar N subintervalos, hay que evaluar la funcin en 2N + 1 puntos. // Para paralelizar, mejor evaluar 2N puntos y sumar el ltimo al final. // Por lo tanto, necesitamos 2N hebras. int nr_blocks = 2 * N / block_size; size_t sharedMem = 512 ; // Reservar arreglos en RAM y en la GPU para guardar los resultados. float *subtotals_h, *subtotals_d; subtotals_h = new float[nr_blocks]; hipMalloc((void **) &subtotals_d, sizeof(float) * nr_blocks); hipLaunchKernelGGL(( integrate_blocks), dim3(nr_blocks), dim3(block_size),sharedMem, 0, subtotals_d); // En la parte (a) de la tarea, // la reduccin global la hacemos en la CPU. hipMemcpy(subtotals_h, subtotals_d, sizeof(float) * nr_blocks, hipMemcpyDeviceToHost); float sum = 0.0; # pragma omp parallel for reduction(+: sum) for (int b = 0; b < nr_blocks; ++b) { sum += subtotals_h[b]; } sum+= host_g(B) + host_g(A); float integral = sum * H / 6.0f; std::cout << "Integral: " << std::setprecision(5) << integral << std::endl; sum = 0.0; hipFree(subtotals_d); std::free(subtotals_h); }
7afe331841278716b9eda444404487e7a68cf849.cu
#include <iostream> #include <iomanip> #include <cstdlib> #include <cmath> #include <cuda_runtime_api.h> #define SQ(x) ((x) * (x)) static const float A = -4.0, B = 4.0; // limites de integración static const int N = 1 << 22; // número de intervalos = 2^22 static const float H = (B - A) / N; // tamaño del intervalo de integración static const float PI(M_PI); // π con precision simple __device__ float h(float x) { return .5f + 1.5f / (1.0f + 50.0f * SQ(x)); } float host_h(float x) { return .5f + 1.5f / (1.0f + 50.0f * SQ(x)); } __device__ float f(float x) { int i; float sum = 0.0f, x0; for (i = 0; i < 10; ++i){ x0 = -3.3f + i * 0.7f; sum += h(x - x0); } return sum/10.0f; } float host_f(float x) { int i; float sum = 0.0f, x0; for (i = 0; i < 10; ++i){ x0 = -3.3f + i * 0.7f; sum += host_h(x - x0); } return sum/10.0f; } __device__ float g(float x) { float c = cosf(2.0f * PI * f(x) * x); return expf(-x/16.0f) * SQ(c); } float host_g(float x) { float c = cosf(2.0f * PI * host_f(x) * x); return expf(-x/16.0f) * SQ(c); } __global__ void integrate_blocks(float subtotals[]) { // Inicializar variable __shared__ extern __shared__ float partialValues[]; int i = threadIdx.x + blockIdx.x * blockDim.x; int tx = threadIdx.x; // Cada thread según su id, calcula la función de g(x) float x = 0; // Realizamos el cálcul x = A + i * (H / 2); partialValues[tx] += (i % 2 == 0 ? 4 : 2) * g(x); // Sincronizamos las hebras una vez que termine __syncthreads(); // Hacer una suma por reduccion con los valores del arreglo subtotals[] for (int offset = blockDim.x/2; offset > 0; offset >>=1){ if (tx < offset) { //Añadimos una suma parcial con el offset partialValues[tx] += partialValues[tx+offset]; } __syncthreads(); } // Escribimos el resultado del primer elemento de nuestro arreglo if(tx == 0) { // Resultados lo guarda por bloques y no por hebras subtotals[blockIdx.x] = partialValues[0]; } } int main(int argc, char *argv[]) { // El programa recibe como parámetro el número de hebras por bloque. // Recuerden que este número debe ser múltiplo de 32 (tamaño del warp) // y puede ser a lo más 512 (limitación del hardware). if (argc < 2) { std::cerr << "Usage: " << argv[0] << " threads_per_block" << std::endl; std::exit(1); } int block_size = std::atoi(argv[1]); // Al usar N subintervalos, hay que evaluar la función en 2N + 1 puntos. // Para paralelizar, mejor evaluar 2N puntos y sumar el último al final. // Por lo tanto, necesitamos 2N hebras. int nr_blocks = 2 * N / block_size; size_t sharedMem = 512 ; // Reservar arreglos en RAM y en la GPU para guardar los resultados. float *subtotals_h, *subtotals_d; subtotals_h = new float[nr_blocks]; cudaMalloc((void **) &subtotals_d, sizeof(float) * nr_blocks); integrate_blocks<<<nr_blocks, block_size,sharedMem>>>(subtotals_d); // En la parte (a) de la tarea, // la reducción global la hacemos en la CPU. cudaMemcpy(subtotals_h, subtotals_d, sizeof(float) * nr_blocks, cudaMemcpyDeviceToHost); float sum = 0.0; # pragma omp parallel for reduction(+: sum) for (int b = 0; b < nr_blocks; ++b) { sum += subtotals_h[b]; } sum+= host_g(B) + host_g(A); float integral = sum * H / 6.0f; std::cout << "Integral: " << std::setprecision(5) << integral << std::endl; sum = 0.0; cudaFree(subtotals_d); std::free(subtotals_h); }
5f9b770ae10eac2df997cfcc5a8661b76be20ed5.hip
// !!! This is a file automatically generated by hipify!!! /** * @file gr_scaling.cpp * @brief BSSN_GR scaling tests. * @version 0.1 * @date 2021-07-25 * * @copyright Copyright (c) 2021 * */ #include "gr.h" #include "grUtils.h" #include "mpi.h" #include "TreeNode.h" #include "mesh.h" #include <vector> #include <iostream> #include "rkBSSN.h" #include "octUtils.h" #include "meshUtils.h" #include "mathUtils.h" #include <fstream> #include <ctime> #include "bssnCtxGPU.cuh" int bssn_driver(MPI_Comm comm, unsigned int num_step,unsigned int warm_up, std::ostream& outfile,unsigned int ts_mode) { int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); std::vector<ot::TreeNode> tmpNodes; std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){bssn::punctureData(x,y,z,var);}; const unsigned int interpVars=bssn::BSSN_NUM_VARS; unsigned int varIndex[interpVars]; for(unsigned int i=0;i<bssn::BSSN_NUM_VARS;i++) varIndex[i]=i; if(false && bssn::BSSN_ENABLE_BLOCK_ADAPTIVITY) { if(!rank) std::cout<<YLW<<"Using block adaptive mesh. AMR disabled "<<NRM<<std::endl; const Point pt_min(bssn::BSSN_BLK_MIN_X,bssn::BSSN_BLK_MIN_Y,bssn::BSSN_BLK_MIN_Z); const Point pt_max(bssn::BSSN_BLK_MAX_X,bssn::BSSN_BLK_MAX_Y,bssn::BSSN_BLK_MAX_Z); bssn::blockAdaptiveOctree(tmpNodes,pt_min,pt_max,m_uiMaxDepth-2,m_uiMaxDepth,comm); }else { if(!rank) std::cout<<YLW<<"Using function2Octree. AMR enabled "<<NRM<<std::endl; const unsigned int f2olmin = ::min(bssn::BSSN_BH1_MAX_LEV,bssn::BSSN_BH2_MAX_LEV); if(f2olmin < MAXDEAPTH_LEVEL_DIFF + 2) { if(!rank) std::cout<<"BH min level should be larger than "<<(MAXDEAPTH_LEVEL_DIFF+2)<<std::endl; MPI_Abort(comm,0); } function2Octree(f_init,bssn::BSSN_NUM_VARS,varIndex,interpVars,tmpNodes,(f2olmin-MAXDEAPTH_LEVEL_DIFF-2),bssn::BSSN_WAVELET_TOL,bssn::BSSN_ELE_ORDER,comm); } //std::vector<ot::TreeNode> f2Octants(tmpNodes); ot::Mesh * mesh = ot::createMesh(tmpNodes.data(),tmpNodes.size(),bssn::BSSN_ELE_ORDER,comm,1,ot::SM_TYPE::FDM,bssn::BSSN_DENDRO_GRAIN_SZ,bssn::BSSN_LOAD_IMB_TOL,bssn::BSSN_SPLIT_FIX); mesh->setDomainBounds(Point(bssn::BSSN_GRID_MIN_X,bssn::BSSN_GRID_MIN_Y,bssn::BSSN_GRID_MIN_Z), Point(bssn::BSSN_GRID_MAX_X, bssn::BSSN_GRID_MAX_Y,bssn::BSSN_GRID_MAX_Z)); unsigned int lmin, lmax; mesh->computeMinMaxLevel(lmin,lmax); tmpNodes.clear(); if(!rank) { std::cout<<"================= Grid Info (Before init grid converge):======================================================="<<std::endl; std::cout<<"lmin: "<<lmin<<" lmax:"<<lmax<<std::endl; std::cout<<"dx: "<<((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth))))<<std::endl; std::cout<<"dt: "<<bssn::BSSN_CFL_FACTOR*((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth))))<<std::endl; std::cout<<"ts mode: "<<ts_mode<<std::endl; std::cout<<"==============================================================================================================="<<std::endl; } bssn::BSSN_RK45_TIME_STEP_SIZE=bssn::BSSN_CFL_FACTOR*((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth)))); tmpNodes.clear(); // enable block adaptivity disable remesh. bssn::BSSN_ENABLE_BLOCK_ADAPTIVITY=1; ot::Mesh* pMesh = mesh; if(bssn::BSSN_RESTORE_SOLVER==0) pMesh = bssn::weakScalingReMesh(mesh,npes); if(ts_mode == 0) { }else if(ts_mode == 1) { bssn::BSSNCtxGPU * bssnCtx = new bssn::BSSNCtxGPU(pMesh); ts::ETS<DendroScalar,bssn::BSSNCtxGPU>* ets = new ts::ETS<DendroScalar,bssn::BSSNCtxGPU>(bssnCtx); ets->set_evolve_vars(bssnCtx->get_evolution_vars()); if((RKType)bssn::BSSN_RK_TYPE == RKType::RK3) ets->set_ets_coefficients(ts::ETSType::RK3); else if((RKType)bssn::BSSN_RK_TYPE == RKType::RK4) ets->set_ets_coefficients(ts::ETSType::RK4); else if((RKType)bssn::BSSN_RK_TYPE == RKType::RK45) ets->set_ets_coefficients(ts::ETSType::RK5); ets->init(); #if defined __PROFILE_CTX__ && defined __PROFILE_ETS__ std::ofstream outfile; char fname [256]; sprintf(fname, "bssnCtxGPU_WS_%d.txt",npes); if(!rank) { outfile.open(fname, std::ios_base::app); time_t now = time(0); // convert now to string form char* dt = ctime(&now); outfile <<"============================================================"<<std::endl; outfile << "Current time : "<<dt<<" --- "<<std::endl; outfile <<"============================================================"<<std::endl; } ets->init_pt(); bssnCtx->reset_pt(); ets->dump_pt(outfile); //bssnCtx->dump_pt(outfile); #endif hipStream_t s_gw; hipStreamCreate(&s_gw); ts::TSInfo ts_gw_output; ts::TSInfo ts_curr; bool is_gw_written=false; bool is_merge_executed =false; double t1 = MPI_Wtime(); while(ets->curr_time() < bssn::BSSN_RK_TIME_END) { const DendroIntL step = ets->curr_step(); const DendroScalar time = ets->curr_time(); bssn::BSSN_CURRENT_RK_COORD_TIME = time; bssn::BSSN_CURRENT_RK_STEP = step; // if(time < 200) // bssn::BSSN_REFINEMENT_MODE = RefinementMode::BH_LOC; // else // bssn::BSSN_REFINEMENT_MODE = RefinementMode::WAMR; const bool isActive = ets->is_active(); const unsigned int rank_global = ets->get_global_rank(); const bool is_merged = bssnCtx->is_bh_merged(0.1); if(is_merged && !is_merge_executed) { bssn::BSSN_REMESH_TEST_FREQ = 3 * bssn::BSSN_REMESH_TEST_FREQ_AFTER_MERGER; bssn::BSSN_MINDEPTH=5; bssn::BSSN_GW_EXTRACT_FREQ = bssn::BSSN_GW_EXTRACT_FREQ_AFTER_MERGER; bssn::BSSN_REFINEMENT_MODE = RefinementMode::WAMR; }/*else { //bssn::BSSN_REFINEMENT_MODE = RefinementMode::BH_LOC; }*/ /*if((step % bssn::BSSN_GW_EXTRACT_FREQ) == 0 ) { if(!rank_global) std::cout<<"[ETS] : Executing step : "<<ets->curr_step()<<"\tcurrent time :"<<ets->curr_time()<<"\t dt:"<<ets->ts_size()<<"\t"<<std::endl; // 03/23/22 : this works only because we use defult stream to for the evolution. Default stream is special and synchronous with all other streams. bssnCtx->device_to_host_async(s_gw); ts_gw_output=bssnCtx->get_ts_info(); is_gw_written=false; if( (step % bssn::BSSN_REMESH_TEST_FREQ) == 0 ) { hipStreamSynchronize(s_gw); bool isRemesh = bssnCtx->is_remesh(); if(isRemesh) { if(!rank_global) std::cout<<"[ETS] : Remesh is triggered. \n"; bssnCtx->remesh_and_gridtransfer(bssn::BSSN_DENDRO_GRAIN_SZ, bssn::BSSN_LOAD_IMB_TOL,bssn::BSSN_SPLIT_FIX); bssn::deallocate_bssn_deriv_workspace(); bssn::allocate_bssn_deriv_workspace(bssnCtx->get_mesh(),1); ets->sync_with_mesh(); // correct timestep size ot::Mesh* pmesh = bssnCtx->get_mesh(); unsigned int lmin, lmax; pmesh->computeMinMaxLevel(lmin,lmax); if(!pmesh->getMPIRank()) printf("post merger grid level = (%d, %d)\n",lmin,lmax); bssn::BSSN_RK45_TIME_STEP_SIZE=bssn::BSSN_CFL_FACTOR*((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth)))); ts::TSInfo ts_in = bssnCtx->get_ts_info(); ts_in._m_uiTh = bssn::BSSN_RK45_TIME_STEP_SIZE; bssnCtx->set_ts_info(ts_in); } } } if((step % bssn::BSSN_GW_EXTRACT_FREQ) == (bssn::BSSN_GW_EXTRACT_FREQ-1)) hipStreamSynchronize(s_gw); if((!is_gw_written) && (hipStreamQuery(s_gw) == hipSuccess)) { ts_curr = bssnCtx->get_ts_info(); bssnCtx->set_ts_info(ts_gw_output); bssnCtx->terminal_output(); bssnCtx->write_vtu(); bssnCtx->evolve_bh_loc(bssnCtx->get_evolution_vars_cpu(),ets->ts_size()*bssn::BSSN_GW_EXTRACT_FREQ); if( (step % bssn::BSSN_CHECKPT_FREQ) == 0 ) bssnCtx->write_checkpt(); bssnCtx->set_ts_info(ts_curr); is_gw_written=true; }*/ ets->evolve(); } #if defined __PROFILE_CTX__ && defined __PROFILE_ETS__ ets->dump_pt(outfile); //bssnCtx->dump_pt(outfile); #endif double t2 = MPI_Wtime()-t1; double t2_g; par::Mpi_Allreduce(&t2,&t2_g,1,MPI_MAX,ets->get_global_comm()); if(!(ets->get_global_rank())) std::cout<<" ETS time (max) : "<<t2_g<<std::endl; if(bssn::BSSN_RESTORE_SOLVER==0) { if(pMesh == mesh) delete mesh; else { delete mesh; delete pMesh; } }else { delete pMesh; } delete bssnCtx; delete ets; }else { if(!rank) RAISE_ERROR("invalid ts mode : "<<ts_mode<<"specifed"); MPI_Abort(comm,0); } return 0; } int main (int argc, char** argv) { // 0- NUTS 1-UTS unsigned int ts_mode=0; if(argc<2) { std::cout<<"Usage: "<<argv[0]<<"paramFile TSMode(0){0-Spatially Adaptive Time Stepping(SATS, "<<GRN<<"default"<<NRM<<") , 1- Uniform Time Stepping. }"<<std::endl; return 0; } if(argc>2) ts_mode = std::atoi(argv[2]); MPI_Init(&argc,&argv); MPI_Comm comm=MPI_COMM_WORLD; int rank,npes; MPI_Comm_rank(comm,&rank); MPI_Comm_size(comm,&npes); // Print out CMAKE options if (!rank) { #ifdef BSSN_COMPUTE_CONSTRAINTS std::cout<<GRN<<" Compiled with BSSN_COMPUTE_CONSTRAINTS"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_COMPUTE_CONSTRAINTS"<<NRM<<std::endl; #endif #ifdef BSSN_ENABLE_VTU_CONSTRAINT_OUTPUT std::cout<<GRN<<" Compiled with BSSN_ENABLE_VTU_CONSTRAINT_OUTPUT"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_ENABLE_VTU_CONSTRAINT_OUTPUT"<<NRM<<std::endl; #endif #ifdef BSSN_ENABLE_VTU_OUTPUT std::cout<<GRN<<" Compiled with BSSN_ENABLE_VTU_OUTPUT"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_ENABLE_VTU_OUTPUT"<<NRM<<std::endl; #endif #ifdef BSSN_ETA_FUNCTION std::cout<<GRN<<" Compiled with BSSN_ETA_FUNCTION"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_ETA_FUNCTION"<<NRM<<std::endl; #endif #ifdef BSSN_EXTRACT_BH_LOCATIONS std::cout<<GRN<<" Compiled with BSSN_EXTRACT_BH_LOCATIONS"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_EXTRACT_BH_LOCATIONS"<<NRM<<std::endl; #endif #ifdef BSSN_EXTRACT_GRAVITATIONAL_WAVES std::cout<<GRN<<" Compiled with BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl; #endif #ifdef BSSN_EXTRACT_GRAVITATIONAL_WAVES std::cout<<GRN<<" Compiled with BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl; #endif #ifdef BSSN_GAUGE_ROCHESTER std::cout<<GRN<<" Compiled with BSSN_GAUGE_ROCHESTER"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_GAUGE_ROCHESTER"<<NRM<<std::endl; #endif #ifdef BSSN_KERR_SCHILD_TEST std::cout<<GRN<<" Compiled with BSSN_KERR_SCHILD_TEST"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_KERR_SCHILD_TEST"<<NRM<<std::endl; #endif #ifdef BSSN_REFINE_BASE_EH std::cout<<GRN<<" Compiled with BSSN_REFINE_BASE_EH"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_REFINE_BASE_EH"<<NRM<<std::endl; #endif #ifdef USE_FD_INTERP_FOR_UNZIP std::cout<<GRN<<" Compiled with USE_FD_INTERP_FOR_UNZIP"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without USE_FD_INTERP_FOR_UNZIP"<<NRM<<std::endl; #endif } //1 . read the parameter file. if(!rank) std::cout<<" reading parameter file :"<<argv[1]<<std::endl; bssn::readParamFile(argv[1],comm); int root = ::min(1,npes-1); bssn::dumpParamFile(std::cout,root,comm); _InitializeHcurve(bssn::BSSN_DIM); m_uiMaxDepth=bssn::BSSN_MAXDEPTH; if(bssn::BSSN_NUM_VARS%bssn::BSSN_ASYNC_COMM_K!=0) { if(!rank) std::cout<<"[overlap communication error]: total BSSN_NUM_VARS: "<<bssn::BSSN_NUM_VARS<<" is not divisable by BSSN_ASYNC_COMM_K: "<<bssn::BSSN_ASYNC_COMM_K<<std::endl; MPI_Abort(comm,0); } if(bssn::BSSN_GW_EXTRACT_FREQ> bssn::BSSN_IO_OUTPUT_FREQ) { if(!rank) std::cout<<" BSSN_GW_EXTRACT_FREQ should be less BSSN_IO_OUTPUT_FREQ "<<std::endl; MPI_Abort(comm,0); } //2. generate the initial grid. std::vector<ot::TreeNode> tmpNodes; std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){bssn::punctureData(x,y,z,var);}; std::function<double(double,double,double)> f_init_alpha=[](double x,double y,double z){ double var[24]; bssn::punctureData(x,y,z,var); return var[0];}; //std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){bssn::KerrSchildData(x,y,z,var);}; const unsigned int interpVars=bssn::BSSN_NUM_VARS; unsigned int varIndex[interpVars]; for(unsigned int i=0;i<bssn::BSSN_NUM_VARS;i++) varIndex[i]=i; /*varIndex[0]=bssn::VAR::U_ALPHA; varIndex[1]=bssn::VAR::U_CHI;*/ DendroIntL localSz,globalSz; double t_stat; double t_stat_g[3]; const unsigned int NUM_WARM_UP=2; const unsigned int NUM_STEPS =1; std::ofstream outfile; char fname [256]; sprintf(fname, "bssnCtx_WS_%d.txt",npes); if(!rank) { outfile.open(fname, std::ios_base::app); time_t now = time(0); // convert now to string form char* dt = ctime(&now); outfile <<"============================================================"<<std::endl; outfile << "Current time : "<<dt<<" --- "<<std::endl; outfile <<"============================================================"<<std::endl; } bssn_driver(comm,NUM_STEPS,NUM_WARM_UP,outfile,1); if(!rank) outfile.close(); #ifdef RUN_WEAK_SCALING if(!rank) std::cout<<"======================================================================"<<std::endl; if(!rank) std::cout<<" Weak Scaling Run Begin. "<<std::endl; if(!rank) std::cout<<"======================================================================"<<std::endl; int proc_group = 0; int min_np = 2; for (int i = npes; rank < i && i >= min_np; i = i >> 1) proc_group++; MPI_Comm comm_ws; MPI_Comm_split(comm, proc_group, rank, &comm_ws); MPI_Comm_rank(comm_ws, &rank); MPI_Comm_size(comm_ws, &npes); if(!rank) outfile.open(fname, std::ios_base::app); MPI_Barrier(comm_ws); bssn_driver(comm_ws,NUM_STEPS,NUM_WARM_UP,outfile,1); MPI_Barrier(comm_ws); if(!rank) outfile.close(); MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); MPI_Barrier(comm); if(!rank) std::cout<<"======================================================================"<<std::endl; if(!rank) std::cout<<" Weak Scaling Run Complete. "<<std::endl; if(!rank) std::cout<<"======================================================================"<<std::endl; #endif MPI_Finalize(); return 0; }
5f9b770ae10eac2df997cfcc5a8661b76be20ed5.cu
/** * @file gr_scaling.cpp * @brief BSSN_GR scaling tests. * @version 0.1 * @date 2021-07-25 * * @copyright Copyright (c) 2021 * */ #include "gr.h" #include "grUtils.h" #include "mpi.h" #include "TreeNode.h" #include "mesh.h" #include <vector> #include <iostream> #include "rkBSSN.h" #include "octUtils.h" #include "meshUtils.h" #include "mathUtils.h" #include <fstream> #include <ctime> #include "bssnCtxGPU.cuh" int bssn_driver(MPI_Comm comm, unsigned int num_step,unsigned int warm_up, std::ostream& outfile,unsigned int ts_mode) { int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); std::vector<ot::TreeNode> tmpNodes; std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){bssn::punctureData(x,y,z,var);}; const unsigned int interpVars=bssn::BSSN_NUM_VARS; unsigned int varIndex[interpVars]; for(unsigned int i=0;i<bssn::BSSN_NUM_VARS;i++) varIndex[i]=i; if(false && bssn::BSSN_ENABLE_BLOCK_ADAPTIVITY) { if(!rank) std::cout<<YLW<<"Using block adaptive mesh. AMR disabled "<<NRM<<std::endl; const Point pt_min(bssn::BSSN_BLK_MIN_X,bssn::BSSN_BLK_MIN_Y,bssn::BSSN_BLK_MIN_Z); const Point pt_max(bssn::BSSN_BLK_MAX_X,bssn::BSSN_BLK_MAX_Y,bssn::BSSN_BLK_MAX_Z); bssn::blockAdaptiveOctree(tmpNodes,pt_min,pt_max,m_uiMaxDepth-2,m_uiMaxDepth,comm); }else { if(!rank) std::cout<<YLW<<"Using function2Octree. AMR enabled "<<NRM<<std::endl; const unsigned int f2olmin = std::min(bssn::BSSN_BH1_MAX_LEV,bssn::BSSN_BH2_MAX_LEV); if(f2olmin < MAXDEAPTH_LEVEL_DIFF + 2) { if(!rank) std::cout<<"BH min level should be larger than "<<(MAXDEAPTH_LEVEL_DIFF+2)<<std::endl; MPI_Abort(comm,0); } function2Octree(f_init,bssn::BSSN_NUM_VARS,varIndex,interpVars,tmpNodes,(f2olmin-MAXDEAPTH_LEVEL_DIFF-2),bssn::BSSN_WAVELET_TOL,bssn::BSSN_ELE_ORDER,comm); } //std::vector<ot::TreeNode> f2Octants(tmpNodes); ot::Mesh * mesh = ot::createMesh(tmpNodes.data(),tmpNodes.size(),bssn::BSSN_ELE_ORDER,comm,1,ot::SM_TYPE::FDM,bssn::BSSN_DENDRO_GRAIN_SZ,bssn::BSSN_LOAD_IMB_TOL,bssn::BSSN_SPLIT_FIX); mesh->setDomainBounds(Point(bssn::BSSN_GRID_MIN_X,bssn::BSSN_GRID_MIN_Y,bssn::BSSN_GRID_MIN_Z), Point(bssn::BSSN_GRID_MAX_X, bssn::BSSN_GRID_MAX_Y,bssn::BSSN_GRID_MAX_Z)); unsigned int lmin, lmax; mesh->computeMinMaxLevel(lmin,lmax); tmpNodes.clear(); if(!rank) { std::cout<<"================= Grid Info (Before init grid converge):======================================================="<<std::endl; std::cout<<"lmin: "<<lmin<<" lmax:"<<lmax<<std::endl; std::cout<<"dx: "<<((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth))))<<std::endl; std::cout<<"dt: "<<bssn::BSSN_CFL_FACTOR*((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth))))<<std::endl; std::cout<<"ts mode: "<<ts_mode<<std::endl; std::cout<<"==============================================================================================================="<<std::endl; } bssn::BSSN_RK45_TIME_STEP_SIZE=bssn::BSSN_CFL_FACTOR*((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth)))); tmpNodes.clear(); // enable block adaptivity disable remesh. bssn::BSSN_ENABLE_BLOCK_ADAPTIVITY=1; ot::Mesh* pMesh = mesh; if(bssn::BSSN_RESTORE_SOLVER==0) pMesh = bssn::weakScalingReMesh(mesh,npes); if(ts_mode == 0) { }else if(ts_mode == 1) { bssn::BSSNCtxGPU * bssnCtx = new bssn::BSSNCtxGPU(pMesh); ts::ETS<DendroScalar,bssn::BSSNCtxGPU>* ets = new ts::ETS<DendroScalar,bssn::BSSNCtxGPU>(bssnCtx); ets->set_evolve_vars(bssnCtx->get_evolution_vars()); if((RKType)bssn::BSSN_RK_TYPE == RKType::RK3) ets->set_ets_coefficients(ts::ETSType::RK3); else if((RKType)bssn::BSSN_RK_TYPE == RKType::RK4) ets->set_ets_coefficients(ts::ETSType::RK4); else if((RKType)bssn::BSSN_RK_TYPE == RKType::RK45) ets->set_ets_coefficients(ts::ETSType::RK5); ets->init(); #if defined __PROFILE_CTX__ && defined __PROFILE_ETS__ std::ofstream outfile; char fname [256]; sprintf(fname, "bssnCtxGPU_WS_%d.txt",npes); if(!rank) { outfile.open(fname, std::ios_base::app); time_t now = time(0); // convert now to string form char* dt = ctime(&now); outfile <<"============================================================"<<std::endl; outfile << "Current time : "<<dt<<" --- "<<std::endl; outfile <<"============================================================"<<std::endl; } ets->init_pt(); bssnCtx->reset_pt(); ets->dump_pt(outfile); //bssnCtx->dump_pt(outfile); #endif cudaStream_t s_gw; cudaStreamCreate(&s_gw); ts::TSInfo ts_gw_output; ts::TSInfo ts_curr; bool is_gw_written=false; bool is_merge_executed =false; double t1 = MPI_Wtime(); while(ets->curr_time() < bssn::BSSN_RK_TIME_END) { const DendroIntL step = ets->curr_step(); const DendroScalar time = ets->curr_time(); bssn::BSSN_CURRENT_RK_COORD_TIME = time; bssn::BSSN_CURRENT_RK_STEP = step; // if(time < 200) // bssn::BSSN_REFINEMENT_MODE = RefinementMode::BH_LOC; // else // bssn::BSSN_REFINEMENT_MODE = RefinementMode::WAMR; const bool isActive = ets->is_active(); const unsigned int rank_global = ets->get_global_rank(); const bool is_merged = bssnCtx->is_bh_merged(0.1); if(is_merged && !is_merge_executed) { bssn::BSSN_REMESH_TEST_FREQ = 3 * bssn::BSSN_REMESH_TEST_FREQ_AFTER_MERGER; bssn::BSSN_MINDEPTH=5; bssn::BSSN_GW_EXTRACT_FREQ = bssn::BSSN_GW_EXTRACT_FREQ_AFTER_MERGER; bssn::BSSN_REFINEMENT_MODE = RefinementMode::WAMR; }/*else { //bssn::BSSN_REFINEMENT_MODE = RefinementMode::BH_LOC; }*/ /*if((step % bssn::BSSN_GW_EXTRACT_FREQ) == 0 ) { if(!rank_global) std::cout<<"[ETS] : Executing step : "<<ets->curr_step()<<"\tcurrent time :"<<ets->curr_time()<<"\t dt:"<<ets->ts_size()<<"\t"<<std::endl; // 03/23/22 : this works only because we use defult stream to for the evolution. Default stream is special and synchronous with all other streams. bssnCtx->device_to_host_async(s_gw); ts_gw_output=bssnCtx->get_ts_info(); is_gw_written=false; if( (step % bssn::BSSN_REMESH_TEST_FREQ) == 0 ) { cudaStreamSynchronize(s_gw); bool isRemesh = bssnCtx->is_remesh(); if(isRemesh) { if(!rank_global) std::cout<<"[ETS] : Remesh is triggered. \n"; bssnCtx->remesh_and_gridtransfer(bssn::BSSN_DENDRO_GRAIN_SZ, bssn::BSSN_LOAD_IMB_TOL,bssn::BSSN_SPLIT_FIX); bssn::deallocate_bssn_deriv_workspace(); bssn::allocate_bssn_deriv_workspace(bssnCtx->get_mesh(),1); ets->sync_with_mesh(); // correct timestep size ot::Mesh* pmesh = bssnCtx->get_mesh(); unsigned int lmin, lmax; pmesh->computeMinMaxLevel(lmin,lmax); if(!pmesh->getMPIRank()) printf("post merger grid level = (%d, %d)\n",lmin,lmax); bssn::BSSN_RK45_TIME_STEP_SIZE=bssn::BSSN_CFL_FACTOR*((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth)))); ts::TSInfo ts_in = bssnCtx->get_ts_info(); ts_in._m_uiTh = bssn::BSSN_RK45_TIME_STEP_SIZE; bssnCtx->set_ts_info(ts_in); } } } if((step % bssn::BSSN_GW_EXTRACT_FREQ) == (bssn::BSSN_GW_EXTRACT_FREQ-1)) cudaStreamSynchronize(s_gw); if((!is_gw_written) && (cudaStreamQuery(s_gw) == cudaSuccess)) { ts_curr = bssnCtx->get_ts_info(); bssnCtx->set_ts_info(ts_gw_output); bssnCtx->terminal_output(); bssnCtx->write_vtu(); bssnCtx->evolve_bh_loc(bssnCtx->get_evolution_vars_cpu(),ets->ts_size()*bssn::BSSN_GW_EXTRACT_FREQ); if( (step % bssn::BSSN_CHECKPT_FREQ) == 0 ) bssnCtx->write_checkpt(); bssnCtx->set_ts_info(ts_curr); is_gw_written=true; }*/ ets->evolve(); } #if defined __PROFILE_CTX__ && defined __PROFILE_ETS__ ets->dump_pt(outfile); //bssnCtx->dump_pt(outfile); #endif double t2 = MPI_Wtime()-t1; double t2_g; par::Mpi_Allreduce(&t2,&t2_g,1,MPI_MAX,ets->get_global_comm()); if(!(ets->get_global_rank())) std::cout<<" ETS time (max) : "<<t2_g<<std::endl; if(bssn::BSSN_RESTORE_SOLVER==0) { if(pMesh == mesh) delete mesh; else { delete mesh; delete pMesh; } }else { delete pMesh; } delete bssnCtx; delete ets; }else { if(!rank) RAISE_ERROR("invalid ts mode : "<<ts_mode<<"specifed"); MPI_Abort(comm,0); } return 0; } int main (int argc, char** argv) { // 0- NUTS 1-UTS unsigned int ts_mode=0; if(argc<2) { std::cout<<"Usage: "<<argv[0]<<"paramFile TSMode(0){0-Spatially Adaptive Time Stepping(SATS, "<<GRN<<"default"<<NRM<<") , 1- Uniform Time Stepping. }"<<std::endl; return 0; } if(argc>2) ts_mode = std::atoi(argv[2]); MPI_Init(&argc,&argv); MPI_Comm comm=MPI_COMM_WORLD; int rank,npes; MPI_Comm_rank(comm,&rank); MPI_Comm_size(comm,&npes); // Print out CMAKE options if (!rank) { #ifdef BSSN_COMPUTE_CONSTRAINTS std::cout<<GRN<<" Compiled with BSSN_COMPUTE_CONSTRAINTS"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_COMPUTE_CONSTRAINTS"<<NRM<<std::endl; #endif #ifdef BSSN_ENABLE_VTU_CONSTRAINT_OUTPUT std::cout<<GRN<<" Compiled with BSSN_ENABLE_VTU_CONSTRAINT_OUTPUT"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_ENABLE_VTU_CONSTRAINT_OUTPUT"<<NRM<<std::endl; #endif #ifdef BSSN_ENABLE_VTU_OUTPUT std::cout<<GRN<<" Compiled with BSSN_ENABLE_VTU_OUTPUT"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_ENABLE_VTU_OUTPUT"<<NRM<<std::endl; #endif #ifdef BSSN_ETA_FUNCTION std::cout<<GRN<<" Compiled with BSSN_ETA_FUNCTION"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_ETA_FUNCTION"<<NRM<<std::endl; #endif #ifdef BSSN_EXTRACT_BH_LOCATIONS std::cout<<GRN<<" Compiled with BSSN_EXTRACT_BH_LOCATIONS"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_EXTRACT_BH_LOCATIONS"<<NRM<<std::endl; #endif #ifdef BSSN_EXTRACT_GRAVITATIONAL_WAVES std::cout<<GRN<<" Compiled with BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl; #endif #ifdef BSSN_EXTRACT_GRAVITATIONAL_WAVES std::cout<<GRN<<" Compiled with BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl; #endif #ifdef BSSN_GAUGE_ROCHESTER std::cout<<GRN<<" Compiled with BSSN_GAUGE_ROCHESTER"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_GAUGE_ROCHESTER"<<NRM<<std::endl; #endif #ifdef BSSN_KERR_SCHILD_TEST std::cout<<GRN<<" Compiled with BSSN_KERR_SCHILD_TEST"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_KERR_SCHILD_TEST"<<NRM<<std::endl; #endif #ifdef BSSN_REFINE_BASE_EH std::cout<<GRN<<" Compiled with BSSN_REFINE_BASE_EH"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without BSSN_REFINE_BASE_EH"<<NRM<<std::endl; #endif #ifdef USE_FD_INTERP_FOR_UNZIP std::cout<<GRN<<" Compiled with USE_FD_INTERP_FOR_UNZIP"<<NRM<<std::endl; #else std::cout<<RED<<" Compiled without USE_FD_INTERP_FOR_UNZIP"<<NRM<<std::endl; #endif } //1 . read the parameter file. if(!rank) std::cout<<" reading parameter file :"<<argv[1]<<std::endl; bssn::readParamFile(argv[1],comm); int root = std::min(1,npes-1); bssn::dumpParamFile(std::cout,root,comm); _InitializeHcurve(bssn::BSSN_DIM); m_uiMaxDepth=bssn::BSSN_MAXDEPTH; if(bssn::BSSN_NUM_VARS%bssn::BSSN_ASYNC_COMM_K!=0) { if(!rank) std::cout<<"[overlap communication error]: total BSSN_NUM_VARS: "<<bssn::BSSN_NUM_VARS<<" is not divisable by BSSN_ASYNC_COMM_K: "<<bssn::BSSN_ASYNC_COMM_K<<std::endl; MPI_Abort(comm,0); } if(bssn::BSSN_GW_EXTRACT_FREQ> bssn::BSSN_IO_OUTPUT_FREQ) { if(!rank) std::cout<<" BSSN_GW_EXTRACT_FREQ should be less BSSN_IO_OUTPUT_FREQ "<<std::endl; MPI_Abort(comm,0); } //2. generate the initial grid. std::vector<ot::TreeNode> tmpNodes; std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){bssn::punctureData(x,y,z,var);}; std::function<double(double,double,double)> f_init_alpha=[](double x,double y,double z){ double var[24]; bssn::punctureData(x,y,z,var); return var[0];}; //std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){bssn::KerrSchildData(x,y,z,var);}; const unsigned int interpVars=bssn::BSSN_NUM_VARS; unsigned int varIndex[interpVars]; for(unsigned int i=0;i<bssn::BSSN_NUM_VARS;i++) varIndex[i]=i; /*varIndex[0]=bssn::VAR::U_ALPHA; varIndex[1]=bssn::VAR::U_CHI;*/ DendroIntL localSz,globalSz; double t_stat; double t_stat_g[3]; const unsigned int NUM_WARM_UP=2; const unsigned int NUM_STEPS =1; std::ofstream outfile; char fname [256]; sprintf(fname, "bssnCtx_WS_%d.txt",npes); if(!rank) { outfile.open(fname, std::ios_base::app); time_t now = time(0); // convert now to string form char* dt = ctime(&now); outfile <<"============================================================"<<std::endl; outfile << "Current time : "<<dt<<" --- "<<std::endl; outfile <<"============================================================"<<std::endl; } bssn_driver(comm,NUM_STEPS,NUM_WARM_UP,outfile,1); if(!rank) outfile.close(); #ifdef RUN_WEAK_SCALING if(!rank) std::cout<<"======================================================================"<<std::endl; if(!rank) std::cout<<" Weak Scaling Run Begin. "<<std::endl; if(!rank) std::cout<<"======================================================================"<<std::endl; int proc_group = 0; int min_np = 2; for (int i = npes; rank < i && i >= min_np; i = i >> 1) proc_group++; MPI_Comm comm_ws; MPI_Comm_split(comm, proc_group, rank, &comm_ws); MPI_Comm_rank(comm_ws, &rank); MPI_Comm_size(comm_ws, &npes); if(!rank) outfile.open(fname, std::ios_base::app); MPI_Barrier(comm_ws); bssn_driver(comm_ws,NUM_STEPS,NUM_WARM_UP,outfile,1); MPI_Barrier(comm_ws); if(!rank) outfile.close(); MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); MPI_Barrier(comm); if(!rank) std::cout<<"======================================================================"<<std::endl; if(!rank) std::cout<<" Weak Scaling Run Complete. "<<std::endl; if(!rank) std::cout<<"======================================================================"<<std::endl; #endif MPI_Finalize(); return 0; }
9685da3a20ba7598fcf995997cd2614f1e0b491a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "smc.h" extern void smc_move( Phase *phase,GPU_info *gpu_info,MPI_info *mpi_info,int step){ for(int i=0;i<step;i++){ for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); dim3 grid(gpu_info->polymerNx,gpu_info->polymerNy,gpu_info->polymerNz); //printf("cite gpu %d %d %d %d\n",grid.x,grid.y,grid.z,phase->MaxThreadPolymer); //grid,phase->MaxThreadPolymer,0,gpu_info->stream[gpu_index] hipLaunchKernelGGL(( mc_polymer_move_arr), dim3(grid),dim3(phase->MaxThreadPolymer),0,gpu_info->stream[gpu_index], phase->phase_info_gpu[gpu_index],phase->poly_arch[gpu_index],phase->pos[gpu_index],gpu_info->state[gpu_index],phase->omega_field_unified[gpu_index]); checkCudaErrors( hipDeviceSynchronize()); //checkCudaErrors(hipGetLastError()); //hipStreamSynchronize(gpu_info->stream[gpu_index]); /* if(i%100==1&&gpu_index==0){ double end_dis=0; for(int polymer_index=0;polymer_index<phase->n_polymers_per_gpu;polymer_index++){ double x=phase->pos[gpu_index][polymer_index*32*3]; double y=phase->pos[gpu_index][polymer_index*32*3+1]; double z=phase->pos[gpu_index][polymer_index*32*3+2]; double x1=phase->pos[gpu_index][(polymer_index*32+31)*3]; double y1=phase->pos[gpu_index][(polymer_index*32+31)*3+1]; double z1=phase->pos[gpu_index][(polymer_index*32+31)*3+2]; end_dis+=(x-x1)*(x-x1)+(y-y1)*(y-y1)+(z-z1)*(z-z1); }//end for polymer_index end_dis=end_dis/(phase->n_polymers_per_gpu); printf(" %d end to end dis %g on gpu %d\n",i,end_dis,gpu_index); }//end if step */ }//end gpu_index }//loop end step }// smc_move end extern void test_stream(GPU_info *gpu_info){ dim3 grid(2,2,20); double *pa; hipStream_t stream1,stream2,stream3,stream4; checkCudaErrors(hipSetDevice(0)); checkCudaErrors(hipMalloc(&(pa),10000 )); checkCudaErrors( hipDeviceSynchronize()); hipStreamCreate ( &stream1) ; hipStreamCreate ( &stream2) ; hipStreamCreate ( &stream3) ; hipStreamCreate ( &stream4) ; hipStreamCreateWithFlags(&stream1,hipStreamNonBlocking); hipStreamCreateWithFlags(&stream2,hipStreamNonBlocking); hipStreamCreateWithFlags(&stream3,hipStreamNonBlocking); hipStreamCreateWithFlags(&stream4,hipStreamNonBlocking); checkCudaErrors(hipSetDevice(0)); hipLaunchKernelGGL(( test_stream), dim3(grid),dim3(1024),0,stream1, pa); hipLaunchKernelGGL(( test_stream), dim3(grid),dim3(1024),0,stream2, pa); hipLaunchKernelGGL(( test_stream), dim3(grid),dim3(1024),0,stream3, pa); hipLaunchKernelGGL(( test_stream), dim3(grid),dim3(1024),0,stream4, pa); hipStreamSynchronize ( stream1 ); hipStreamSynchronize ( stream2 ); hipStreamSynchronize ( stream3 ); hipStreamSynchronize ( stream4 ); checkCudaErrors(hipSetDevice(0)); checkCudaErrors( hipDeviceSynchronize()); } extern void test_Phase_info_gpu(int cpu_index,int gpu_index,Phase *phase,GPU_info *gpu_info,MPI_info *mpi_info){ if(mpi_info->current_node==cpu_index){ printf("----------Program phase data on node %d GPU %d is tested! \n",cpu_index,gpu_index); checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); printf("chi N matrix:\n"); for(int i=0;i<phase->n_mono_types;i++){ for(int j=0;j<phase->n_mono_types;j++) printf("%g ",phase->phase_info_gpu[gpu_index]->xn[i+j*phase->n_mono_types]); printf("\n"); } printf("nx %d ny %d nz %d\n",phase->phase_info_gpu[gpu_index]->nx,phase->phase_info_gpu[gpu_index]->ny,phase->phase_info_gpu[gpu_index]->nz); printf("total cells %ld \n",phase->phase_info_gpu[gpu_index]->n_cells); printf("Lx %g Ly %g Lz %g\n",phase->phase_info_gpu[gpu_index]->Lx,phase->phase_info_gpu[gpu_index]->Ly,phase->phase_info_gpu[gpu_index]->Lz); printf("iLx %g iLy %g iLz %g\n",phase->phase_info_gpu[gpu_index]->iLx,phase->phase_info_gpu[gpu_index]->iLy,phase->phase_info_gpu[gpu_index]->iLz); printf("dx %g dy %g dz %g\n",phase->phase_info_gpu[gpu_index]->dx,phase->phase_info_gpu[gpu_index]->dy,phase->phase_info_gpu[gpu_index]->dz); printf("n_polymers: %d \n",phase->phase_info_gpu[gpu_index]->n_polymers); printf("n_polymers per node : %d \n",phase->phase_info_gpu[gpu_index]->n_polymers_per_node); printf("n_polymers per gpu : %d \n",phase->phase_info_gpu[gpu_index]->n_polymers_per_gpu); printf("polymer_type_number: %d \n",phase->phase_info_gpu[gpu_index]->polymer_type_number); printf("n_polymer_type: "); for(int i=0;i<phase->phase_info_gpu[gpu_index]->polymer_type_number;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->n_polymer_type[i]); printf("\n"); printf("n_polymer_type_node: "); for(int i=0;i<phase->phase_info_gpu[gpu_index]->polymer_type_number;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->n_polymers_type_per_node[i]); printf("\n"); printf("n_polymer_type_gpu: "); for(int i=0;i<phase->phase_info_gpu[gpu_index]->polymer_type_number;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->n_polymers_type_per_gpu[i]); printf("\n"); printf("n_mono_types %d \n",phase->phase_info_gpu[gpu_index]->n_mono_types); printf("num_all_beads: %d \n",phase->phase_info_gpu[gpu_index]->num_all_beads); printf("num_all_beads_per_node: %d \n",phase->phase_info_gpu[gpu_index]->num_all_beads_per_node); printf("num_all_beads_per_gpu: %d \n",phase->phase_info_gpu[gpu_index]->num_all_beads_per_gpu); printf("n_bead_type_gpu: "); for(int i=0;i<phase->phase_info_gpu[gpu_index]->n_mono_types;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->num_bead_type[i]); printf("\n"); printf("num_all_beads_per_node: "); for(int i=0;i<phase->phase_info_gpu[gpu_index]->n_mono_types;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->num_bead_type_per_node[i]); printf("\n"); printf("num_all_beads_per_gpu: "); for(int i=0;i<phase->phase_info_gpu[gpu_index]->n_mono_types;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->num_bead_type_per_gpu[i]); printf("\n"); printf("polymer_basis_gpu: "); for(int i=0;i<=phase->phase_info_gpu[gpu_index]->polymer_type_number;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->polymer_basis_gpu[i]); printf("\n"); printf("bead_basis_gpu: "); for(int i=0;i<=phase->phase_info_gpu[gpu_index]->n_mono_types;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->monomer_poly_basis_gpu[i]); printf("\n"); printf("field_scaling_type: "); for(int i=0;i<=phase->phase_info_gpu[gpu_index]->n_mono_types;i++)printf("%g ",phase->phase_info_gpu[gpu_index]->field_scaling_type[i]); printf("\n"); printf("inverse_refbeads: %g\n",phase->phase_info_gpu[gpu_index]->inverse_refbeads); printf("refbeads: %d\n",phase->phase_info_gpu[gpu_index]->reference_Nbeads); printf("harmonic_normb: %g\n",phase->phase_info_gpu[gpu_index]->harmonic_normb); printf("current_node: %d\n",phase->phase_info_gpu[gpu_index]->current_node); printf("total_nodes: %d\n",phase->phase_info_gpu[gpu_index]->total_nodes); printf("MaxThreadDensity: %d\n",phase->phase_info_gpu[gpu_index]->MaxThreadDensity); printf("MaxThreadPolymer: %d\n",phase->phase_info_gpu[gpu_index]->MaxThreadPolymer); printf("----------Program structure data on node %d GPU %d is tested! \n",cpu_index,gpu_index); for(int poly_type=0;poly_type<phase->phase_info_gpu[gpu_index]->polymer_type_number;poly_type++){ printf("poly index %d \n",poly_type); printf("poly_length %d \n",phase->poly_arch[gpu_index][poly_type].poly_length); for(int i=0;i<phase->phase_info_gpu[gpu_index]->n_mono_types;i++) printf("%d ",phase->poly_arch[gpu_index][poly_type].mono_type_length[i]); printf("\n"); printf("length_bond %g\n",phase->poly_arch[gpu_index][poly_type].length_bond); printf("Mono type\n"); for(int i=0;i<phase->poly_arch[gpu_index][poly_type].poly_length;i++) printf("%d ",phase->poly_arch[gpu_index][poly_type].Monotype[i]); printf("\n"); printf("\n"); printf("Structure of polymer!\n"); for(int i=0;i<phase->poly_arch[gpu_index][poly_type].poly_length;i++){ for(int j=0;j<phase->poly_arch[gpu_index][poly_type].poly_length;j++) printf("%d ",phase->poly_arch[gpu_index][poly_type].connection[i+j*phase->poly_arch[gpu_index][poly_type].poly_length]); printf("\n"); } printf("\n"); for(int i=0;i<phase->poly_arch[gpu_index][poly_type].poly_length;i++) printf("%d ",phase->poly_arch[gpu_index][poly_type].neigh_num[i]); printf("\n"); printf("reference_Nbeads %d \n",phase->poly_arch[gpu_index][poly_type].reference_Nbeads); for(int i=0;i<phase->poly_arch[gpu_index][poly_type].poly_length;i++){ for(int j=0;j<phase->poly_arch[gpu_index][poly_type].neigh_num[i];j++) printf("%d ",phase->poly_arch[gpu_index][poly_type].conection_list[i][j]); printf("\n"); } printf("--------------------\n"); }// end for poly_type }//end if cpu chosen }
9685da3a20ba7598fcf995997cd2614f1e0b491a.cu
#include "smc.h" extern void smc_move( Phase *phase,GPU_info *gpu_info,MPI_info *mpi_info,int step){ for(int i=0;i<step;i++){ for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); dim3 grid(gpu_info->polymerNx,gpu_info->polymerNy,gpu_info->polymerNz); //printf("cite gpu %d %d %d %d\n",grid.x,grid.y,grid.z,phase->MaxThreadPolymer); //grid,phase->MaxThreadPolymer,0,gpu_info->stream[gpu_index] mc_polymer_move_arr<<<grid,phase->MaxThreadPolymer,0,gpu_info->stream[gpu_index]>>>(phase->phase_info_gpu[gpu_index],phase->poly_arch[gpu_index],phase->pos[gpu_index],gpu_info->state[gpu_index],phase->omega_field_unified[gpu_index]); checkCudaErrors( cudaDeviceSynchronize()); //checkCudaErrors(cudaGetLastError()); //cudaStreamSynchronize(gpu_info->stream[gpu_index]); /* if(i%100==1&&gpu_index==0){ double end_dis=0; for(int polymer_index=0;polymer_index<phase->n_polymers_per_gpu;polymer_index++){ double x=phase->pos[gpu_index][polymer_index*32*3]; double y=phase->pos[gpu_index][polymer_index*32*3+1]; double z=phase->pos[gpu_index][polymer_index*32*3+2]; double x1=phase->pos[gpu_index][(polymer_index*32+31)*3]; double y1=phase->pos[gpu_index][(polymer_index*32+31)*3+1]; double z1=phase->pos[gpu_index][(polymer_index*32+31)*3+2]; end_dis+=(x-x1)*(x-x1)+(y-y1)*(y-y1)+(z-z1)*(z-z1); }//end for polymer_index end_dis=end_dis/(phase->n_polymers_per_gpu); printf(" %d end to end dis %g on gpu %d\n",i,end_dis,gpu_index); }//end if step */ }//end gpu_index }//loop end step }// smc_move end extern void test_stream(GPU_info *gpu_info){ dim3 grid(2,2,20); double *pa; cudaStream_t stream1,stream2,stream3,stream4; checkCudaErrors(cudaSetDevice(0)); checkCudaErrors(cudaMalloc(&(pa),10000 )); checkCudaErrors( cudaDeviceSynchronize()); cudaStreamCreate ( &stream1) ; cudaStreamCreate ( &stream2) ; cudaStreamCreate ( &stream3) ; cudaStreamCreate ( &stream4) ; cudaStreamCreateWithFlags(&stream1,cudaStreamNonBlocking); cudaStreamCreateWithFlags(&stream2,cudaStreamNonBlocking); cudaStreamCreateWithFlags(&stream3,cudaStreamNonBlocking); cudaStreamCreateWithFlags(&stream4,cudaStreamNonBlocking); checkCudaErrors(cudaSetDevice(0)); test_stream<<<grid,1024,0,stream1>>>(pa); test_stream<<<grid,1024,0,stream2>>>(pa); test_stream<<<grid,1024,0,stream3>>>(pa); test_stream<<<grid,1024,0,stream4>>>(pa); cudaStreamSynchronize ( stream1 ); cudaStreamSynchronize ( stream2 ); cudaStreamSynchronize ( stream3 ); cudaStreamSynchronize ( stream4 ); checkCudaErrors(cudaSetDevice(0)); checkCudaErrors( cudaDeviceSynchronize()); } extern void test_Phase_info_gpu(int cpu_index,int gpu_index,Phase *phase,GPU_info *gpu_info,MPI_info *mpi_info){ if(mpi_info->current_node==cpu_index){ printf("----------Program phase data on node %d GPU %d is tested! \n",cpu_index,gpu_index); checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); printf("chi N matrix:\n"); for(int i=0;i<phase->n_mono_types;i++){ for(int j=0;j<phase->n_mono_types;j++) printf("%g ",phase->phase_info_gpu[gpu_index]->xn[i+j*phase->n_mono_types]); printf("\n"); } printf("nx %d ny %d nz %d\n",phase->phase_info_gpu[gpu_index]->nx,phase->phase_info_gpu[gpu_index]->ny,phase->phase_info_gpu[gpu_index]->nz); printf("total cells %ld \n",phase->phase_info_gpu[gpu_index]->n_cells); printf("Lx %g Ly %g Lz %g\n",phase->phase_info_gpu[gpu_index]->Lx,phase->phase_info_gpu[gpu_index]->Ly,phase->phase_info_gpu[gpu_index]->Lz); printf("iLx %g iLy %g iLz %g\n",phase->phase_info_gpu[gpu_index]->iLx,phase->phase_info_gpu[gpu_index]->iLy,phase->phase_info_gpu[gpu_index]->iLz); printf("dx %g dy %g dz %g\n",phase->phase_info_gpu[gpu_index]->dx,phase->phase_info_gpu[gpu_index]->dy,phase->phase_info_gpu[gpu_index]->dz); printf("n_polymers: %d \n",phase->phase_info_gpu[gpu_index]->n_polymers); printf("n_polymers per node : %d \n",phase->phase_info_gpu[gpu_index]->n_polymers_per_node); printf("n_polymers per gpu : %d \n",phase->phase_info_gpu[gpu_index]->n_polymers_per_gpu); printf("polymer_type_number: %d \n",phase->phase_info_gpu[gpu_index]->polymer_type_number); printf("n_polymer_type: "); for(int i=0;i<phase->phase_info_gpu[gpu_index]->polymer_type_number;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->n_polymer_type[i]); printf("\n"); printf("n_polymer_type_node: "); for(int i=0;i<phase->phase_info_gpu[gpu_index]->polymer_type_number;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->n_polymers_type_per_node[i]); printf("\n"); printf("n_polymer_type_gpu: "); for(int i=0;i<phase->phase_info_gpu[gpu_index]->polymer_type_number;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->n_polymers_type_per_gpu[i]); printf("\n"); printf("n_mono_types %d \n",phase->phase_info_gpu[gpu_index]->n_mono_types); printf("num_all_beads: %d \n",phase->phase_info_gpu[gpu_index]->num_all_beads); printf("num_all_beads_per_node: %d \n",phase->phase_info_gpu[gpu_index]->num_all_beads_per_node); printf("num_all_beads_per_gpu: %d \n",phase->phase_info_gpu[gpu_index]->num_all_beads_per_gpu); printf("n_bead_type_gpu: "); for(int i=0;i<phase->phase_info_gpu[gpu_index]->n_mono_types;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->num_bead_type[i]); printf("\n"); printf("num_all_beads_per_node: "); for(int i=0;i<phase->phase_info_gpu[gpu_index]->n_mono_types;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->num_bead_type_per_node[i]); printf("\n"); printf("num_all_beads_per_gpu: "); for(int i=0;i<phase->phase_info_gpu[gpu_index]->n_mono_types;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->num_bead_type_per_gpu[i]); printf("\n"); printf("polymer_basis_gpu: "); for(int i=0;i<=phase->phase_info_gpu[gpu_index]->polymer_type_number;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->polymer_basis_gpu[i]); printf("\n"); printf("bead_basis_gpu: "); for(int i=0;i<=phase->phase_info_gpu[gpu_index]->n_mono_types;i++)printf("%d ",phase->phase_info_gpu[gpu_index]->monomer_poly_basis_gpu[i]); printf("\n"); printf("field_scaling_type: "); for(int i=0;i<=phase->phase_info_gpu[gpu_index]->n_mono_types;i++)printf("%g ",phase->phase_info_gpu[gpu_index]->field_scaling_type[i]); printf("\n"); printf("inverse_refbeads: %g\n",phase->phase_info_gpu[gpu_index]->inverse_refbeads); printf("refbeads: %d\n",phase->phase_info_gpu[gpu_index]->reference_Nbeads); printf("harmonic_normb: %g\n",phase->phase_info_gpu[gpu_index]->harmonic_normb); printf("current_node: %d\n",phase->phase_info_gpu[gpu_index]->current_node); printf("total_nodes: %d\n",phase->phase_info_gpu[gpu_index]->total_nodes); printf("MaxThreadDensity: %d\n",phase->phase_info_gpu[gpu_index]->MaxThreadDensity); printf("MaxThreadPolymer: %d\n",phase->phase_info_gpu[gpu_index]->MaxThreadPolymer); printf("----------Program structure data on node %d GPU %d is tested! \n",cpu_index,gpu_index); for(int poly_type=0;poly_type<phase->phase_info_gpu[gpu_index]->polymer_type_number;poly_type++){ printf("poly index %d \n",poly_type); printf("poly_length %d \n",phase->poly_arch[gpu_index][poly_type].poly_length); for(int i=0;i<phase->phase_info_gpu[gpu_index]->n_mono_types;i++) printf("%d ",phase->poly_arch[gpu_index][poly_type].mono_type_length[i]); printf("\n"); printf("length_bond %g\n",phase->poly_arch[gpu_index][poly_type].length_bond); printf("Mono type\n"); for(int i=0;i<phase->poly_arch[gpu_index][poly_type].poly_length;i++) printf("%d ",phase->poly_arch[gpu_index][poly_type].Monotype[i]); printf("\n"); printf("\n"); printf("Structure of polymer!\n"); for(int i=0;i<phase->poly_arch[gpu_index][poly_type].poly_length;i++){ for(int j=0;j<phase->poly_arch[gpu_index][poly_type].poly_length;j++) printf("%d ",phase->poly_arch[gpu_index][poly_type].connection[i+j*phase->poly_arch[gpu_index][poly_type].poly_length]); printf("\n"); } printf("\n"); for(int i=0;i<phase->poly_arch[gpu_index][poly_type].poly_length;i++) printf("%d ",phase->poly_arch[gpu_index][poly_type].neigh_num[i]); printf("\n"); printf("reference_Nbeads %d \n",phase->poly_arch[gpu_index][poly_type].reference_Nbeads); for(int i=0;i<phase->poly_arch[gpu_index][poly_type].poly_length;i++){ for(int j=0;j<phase->poly_arch[gpu_index][poly_type].neigh_num[i];j++) printf("%d ",phase->poly_arch[gpu_index][poly_type].conection_list[i][j]); printf("\n"); } printf("--------------------\n"); }// end for poly_type }//end if cpu chosen }
ab9c41f31e9be1277b3002da28c0bf547a31e9df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <iostream> #include <fstream> #include <sstream> #include <cstring> #include <iomanip> #include <numeric> #include <stdlib.h> #include <math.h> #define min(a,b) a<b ? a : b #define max(a,b) a>b ? a : b #define DEFL using namespace std; typedef float T; typedef struct { unsigned int blocks; unsigned int blockSize; unsigned int blocksZ; unsigned int Nx; unsigned int Ny; unsigned int Nz; unsigned int size; unsigned int steps; unsigned int maxiter; T dt; T maxres; unsigned int nRowsZ; unsigned int nDV; unsigned int NxZ; } Parameters; typedef struct { T rho; T cp; T k; } MaterialProperties; Parameters params; MaterialProperties steel; MaterialProperties Ag; MaterialProperties MgO; MaterialProperties inconel; MaterialProperties NiCr; #include "cpuFunctions.h" //#include "cpuFunctionsDeflation.h" #include "cudaFunctions.h" //#include "cudaFunctionsDeflation.h" int main(void) { // Parameters params.blocks = 256; params.blockSize = 128; params.Nx = 32; params.Ny = 32; params.Nz = 128; params.size = params.Nx*params.Ny*params.Nz + 2*params.Nx*params.Ny; params.steps = 1; params.maxiter = 1000000; params.dt = 1./300.; // 0.0033333333333333; params.maxres = 1e-4; params.nRowsZ = 4; // number of rows/columns for one deflation vector params.nDV = (params.Nx*params.Ny) / (params.nRowsZ*params.nRowsZ); params.NxZ = params.Nx/params.nRowsZ; // number of course cells in a row // steel steel.rho = 7610.0; // 7700 steel.cp = 545.0; // 560 steel.k = 21.0; // Ag Ag.rho = 8957.0; Ag.cp = 362.0; // 368 Ag.k = 120; // 111.5 // MgO MgO.rho = 3150.0; MgO.cp = 1110.0; // 1140 MgO.k = 11.5; // 10 // inconel inconel.rho = 8470.0; inconel.cp = 500.0; // 520 inconel.k = 20.5; // NiCr NiCr.rho = 8200.0; NiCr.cp = 528.0; NiCr.k = 24.5; T t = 0; // time T totalIter = 0; dim3 dimGrid(params.blocks); dim3 dimBlock(params.blockSize); cout << "------------------" << endl; cout << "--- 3D CG TNS1 ---" << endl; cout << "------------------" << endl; cpuInit(params.blocks, params.Nx, params.Ny, params.Nz); //cpuInitDeflation(params.Nx, params.Ny, params.NxZ, params.nDV); readGeometry(hm, params.Nx, params.Ny); // materials readCoords(xc, dx, "xCoords.txt"); readCoords(yc, dy, "yCoords.txt"); readCoords(zc, dz, "zCoords.txt"); readBC(tHF, params.steps); initX(hT, params.Nx, params.Ny, params.Nz); initA(params.dt, params.Nx, params.Ny, params.Nz); //initAZ(params.Nx, params.Ny, params.nRowsZ); //initE(params.Nx, params.Ny, params.nRowsZ, params.NxZ); //spChol(params.NxZ, params.nDV); //check_varZ(ecc, ess, params.nDV, params.NxZ); //saveData<int>(hm, "materials3D32x32x128", params.Nx, params.Ny, params.Nz); //saveDataInTime(hT, t, "temperature32x32x128", params.Nx, params.Ny, params.Nz); //check_var(dy, params.Nx, params.Ny); // CUDA cudaInit(hT, hV, hcc, hff, hss, hww, hqB, params.blocks, params.Nx, params.Ny, params.Nz); //cudaInitDeflation(azc, azw, azs, ecc, eww, ess, params.NxZ, params.nDV, params.Nx, params.Ny); hipLaunchKernelGGL(( makeTNS2), dim3(1024),dim3(128), 0, 0, kc,kff,kfs,kfw,kf,kfe,kfn,kss,ksw,ks,kse,kww,kw,dcc,dff,dss,dww, params.Nx, params.Ny, params.Nz, params.Nx*params.Ny*params.Nz, params.Nx*params.Ny); hipEvent_t startT, stopT; float elapsedTime; hipEventCreate(&startT); hipEventCreate(&stopT); hipEventRecord(startT,0); for (int miter=0; miter<params.steps; miter++) { hipMemcpy(dr, dT, sizeof(T)*params.size, hipMemcpyDeviceToDevice); // r = rhs // 128x32x32 ~ grid dimensions hipLaunchKernelGGL(( elementWiseMul), dim3(128),dim3(1024), 0, 0, dr, dV, params.Nx, params.Ny); // r = V*r // 32x32 ~ grid dimensions hipLaunchKernelGGL(( addNeumannBC), dim3(1),dim3(1024), 0, 0, dr, dqB, tHF[miter], params.Nx, params.Ny); // time dependent Neumann boundary here ... r = r + NeumannBC (dqB) hipLaunchKernelGGL(( SpMVv1), dim3(128),dim3(1024), 0, 0, dq, dcc, dff, dss, dww, dT, params.Nx, params.Ny); // q = Ax (version 1) hipLaunchKernelGGL(( AXPY), dim3(128),dim3(1024), 0, 0, dr, dq, (T)-1., (T)1., params.Nx, params.Ny); // r = r - q /*#ifdef DEFL // --- (DEFLATION) r = Pr --- ///hipMemset(dyZ,0,sizeof(T)*(params.nDV+2*params.NxZ)); // reset; ZTransXYDeflation<<<4,64>>>(drZ, dr, params.nRowsZ, params.NxZ, params.Nx); // y1 = Z'*y // E*y2 = y1 (begin) hipMemcpy(hrZ, drZ, (params.nDV+2*params.NxZ)*sizeof(double), hipMemcpyDeviceToHost); // copy drZ to hrZ solve(params.NxZ,params.nDV); hipMemcpy(dyZ, hyZ, (params.nDV+2*params.NxZ)*sizeof(double), hipMemcpyHostToDevice); //copy hyZ to dyZ ///printX(hyZ, params.NxZ,params.nDV); // E*y2 = y1 (end) YMinusAzXYDeflation<<<32,128>>>(dr,dyZ,dazc,dazw,dazs,params.nRowsZ,params.NxZ,params.Nx); // r = P*r #endif */ hipLaunchKernelGGL(( SpMVv2), dim3(128),dim3(1024), 0, 0, ds,kc,kff,kfs,kfw,kf,kfe,kfn,kss,ksw,ks,kse,kww,kw,dr, params.Nx,params.Ny, params.Nx*params.Ny*params.Nz,params.Nx*params.Ny); // z = M^(-1)r (version 2) hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(dimGrid),dim3(dimBlock),params.blockSize*sizeof(T), 0, drh, dr, ds, params.Nx, params.Ny, params.Nz); hipMemcpy(hrh, drh, params.blocks*sizeof(T), hipMemcpyDeviceToHost); rhNew = dot(hrh,params.blocks); //stop = rhNew * params.maxres * params.maxres; // --- stop criterion here --- hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(dimGrid),dim3(dimBlock),params.blockSize*sizeof(T), 0, drh, ds, ds, params.Nx, params.Ny, params.Nz); hipMemcpy(hrh, drh, params.blocks*sizeof(T), hipMemcpyDeviceToHost); stop = dot(hrh,params.blocks) * params.maxres * params.maxres; iter = 0; //cout << "stop:" << stop << ", residual: " << rhNew << endl; while (abs(rhNew) > stop && iter < params.maxiter) { //while (iter < 10) { // only testing iter++; totalIter++; //cout << "iteration:" << iter << ", residual: " << abs(rhNew) << endl; if (iter==1) { hipMemcpy(dp, ds, sizeof(T)*params.size,hipMemcpyDeviceToDevice); } else { bt = rhNew/rhOld; hipLaunchKernelGGL(( AXPY), dim3(128),dim3(1024), 0, 0, dp, ds, (T)1., bt, params.Nx, params.Ny); // p = z + beta*p } hipLaunchKernelGGL(( SpMVv1), dim3(128),dim3(1024), 0, 0, dq, dcc, dff, dss, dww, dp, params.Nx, params.Ny); // q = Ap (version 1) /*#ifdef DEFL // --- (DEFLATION) q = Pq --- ZTransXYDeflation<<<4,64>>>(drZ, dq, params.nRowsZ, params.NxZ, params.Nx); // y1 = Z'*y // E*y2 = y1 (begin) hipMemcpy(hrZ, drZ, (params.nDV+2*params.NxZ)*sizeof(T), hipMemcpyDeviceToHost); // copy drZ to hrZ solve(params.NxZ,params.nDV); hipMemcpy(dyZ, hyZ, (params.nDV+2*params.NxZ)*sizeof(T), hipMemcpyHostToDevice); //copy hyZ to dyZ // E*y2 = y1 (end) YMinusAzXYDeflation<<<32,128>>>(dq,dyZ,dazc,dazw,dazs,params.nRowsZ,params.NxZ,params.Nx); // q = Pq #endif*/ hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(dimGrid),dim3(dimBlock),params.blockSize*sizeof(T), 0, dsg, dp, dq, params.Nx, params.Ny, params.Nz); // sigma = <p,q> hipMemcpy(hsg, dsg, params.blocks*sizeof(T), hipMemcpyDeviceToHost); sg = dot(hsg,params.blocks); ap = rhNew/sg; // alpha = rhoNew / sigma hipLaunchKernelGGL(( AXPY), dim3(128),dim3(1024), 0, 0, dr, dq, -ap, (T)1., params.Nx, params.Ny); // r = r - alpha*q hipLaunchKernelGGL(( AXPY), dim3(128),dim3(1024), 0, 0, dT, dp, ap, (T)1., params.Nx, params.Ny); // x = x + alpha*p hipLaunchKernelGGL(( SpMVv2), dim3(128),dim3(1024), 0, 0, ds,kc,kff,kfs,kfw,kf,kfe,kfn,kss,ksw,ks,kse,kww,kw,dr, params.Nx,params.Ny, params.Nx*params.Ny*params.Nz,params.Nx*params.Ny); // z = M^(-1)r (version 2) rhOld = rhNew; hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(dimGrid),dim3(dimBlock),params.blockSize*sizeof(T), 0, drh, dr, ds, params.Nx, params.Ny, params.Nz); // rhoNew = <r,z> hipMemcpy(hrh, drh, params.blocks*sizeof(T), hipMemcpyDeviceToHost); rhNew = dot(hrh,params.blocks); } // x = x + (deflation) t += params.dt; //cout << "t= " << setprecision(3) << fixed << t << " s ,tmstp:" << miter << " ,it:" << iter << endl; //if ((miter+1)%4000==0) //{ //hipMemcpy(hT, dT, sizeof(T)*params.size, hipMemcpyDeviceToHost); //saveDataInTime(hT, t, "temperature32x32x128", params.Nx, params.Ny, params.Nz); //} } hipEventRecord(stopT,0); hipEventSynchronize(stopT); hipEventElapsedTime(&elapsedTime, startT, stopT); cout<< "ellapsed time (cuda): " << elapsedTime << " miliseconds" << endl; cout << "Simulation finished." << endl; cout << "total number of iterations: " << totalIter << endl; hipEventDestroy(startT); hipEventDestroy(stopT); //cudaFinalizeDeflation(); cudaFinalize(); //cpuFinalizeDeflation(); cpuFinalize(); return 0; }
ab9c41f31e9be1277b3002da28c0bf547a31e9df.cu
#include <algorithm> #include <iostream> #include <fstream> #include <sstream> #include <cstring> #include <iomanip> #include <numeric> #include <stdlib.h> #include <math.h> #define min(a,b) a<b ? a : b #define max(a,b) a>b ? a : b #define DEFL using namespace std; typedef float T; typedef struct { unsigned int blocks; unsigned int blockSize; unsigned int blocksZ; unsigned int Nx; unsigned int Ny; unsigned int Nz; unsigned int size; unsigned int steps; unsigned int maxiter; T dt; T maxres; unsigned int nRowsZ; unsigned int nDV; unsigned int NxZ; } Parameters; typedef struct { T rho; T cp; T k; } MaterialProperties; Parameters params; MaterialProperties steel; MaterialProperties Ag; MaterialProperties MgO; MaterialProperties inconel; MaterialProperties NiCr; #include "cpuFunctions.h" //#include "cpuFunctionsDeflation.h" #include "cudaFunctions.h" //#include "cudaFunctionsDeflation.h" int main(void) { // Parameters params.blocks = 256; params.blockSize = 128; params.Nx = 32; params.Ny = 32; params.Nz = 128; params.size = params.Nx*params.Ny*params.Nz + 2*params.Nx*params.Ny; params.steps = 1; params.maxiter = 1000000; params.dt = 1./300.; // 0.0033333333333333; params.maxres = 1e-4; params.nRowsZ = 4; // number of rows/columns for one deflation vector params.nDV = (params.Nx*params.Ny) / (params.nRowsZ*params.nRowsZ); params.NxZ = params.Nx/params.nRowsZ; // number of course cells in a row // steel steel.rho = 7610.0; // 7700 steel.cp = 545.0; // 560 steel.k = 21.0; // Ag Ag.rho = 8957.0; Ag.cp = 362.0; // 368 Ag.k = 120; // 111.5 // MgO MgO.rho = 3150.0; MgO.cp = 1110.0; // 1140 MgO.k = 11.5; // 10 // inconel inconel.rho = 8470.0; inconel.cp = 500.0; // 520 inconel.k = 20.5; // NiCr NiCr.rho = 8200.0; NiCr.cp = 528.0; NiCr.k = 24.5; T t = 0; // time T totalIter = 0; dim3 dimGrid(params.blocks); dim3 dimBlock(params.blockSize); cout << "------------------" << endl; cout << "--- 3D CG TNS1 ---" << endl; cout << "------------------" << endl; cpuInit(params.blocks, params.Nx, params.Ny, params.Nz); //cpuInitDeflation(params.Nx, params.Ny, params.NxZ, params.nDV); readGeometry(hm, params.Nx, params.Ny); // materials readCoords(xc, dx, "xCoords.txt"); readCoords(yc, dy, "yCoords.txt"); readCoords(zc, dz, "zCoords.txt"); readBC(tHF, params.steps); initX(hT, params.Nx, params.Ny, params.Nz); initA(params.dt, params.Nx, params.Ny, params.Nz); //initAZ(params.Nx, params.Ny, params.nRowsZ); //initE(params.Nx, params.Ny, params.nRowsZ, params.NxZ); //spChol(params.NxZ, params.nDV); //check_varZ(ecc, ess, params.nDV, params.NxZ); //saveData<int>(hm, "materials3D32x32x128", params.Nx, params.Ny, params.Nz); //saveDataInTime(hT, t, "temperature32x32x128", params.Nx, params.Ny, params.Nz); //check_var(dy, params.Nx, params.Ny); // CUDA cudaInit(hT, hV, hcc, hff, hss, hww, hqB, params.blocks, params.Nx, params.Ny, params.Nz); //cudaInitDeflation(azc, azw, azs, ecc, eww, ess, params.NxZ, params.nDV, params.Nx, params.Ny); makeTNS2<<<1024,128>>>(kc,kff,kfs,kfw,kf,kfe,kfn,kss,ksw,ks,kse,kww,kw,dcc,dff,dss,dww, params.Nx, params.Ny, params.Nz, params.Nx*params.Ny*params.Nz, params.Nx*params.Ny); cudaEvent_t startT, stopT; float elapsedTime; cudaEventCreate(&startT); cudaEventCreate(&stopT); cudaEventRecord(startT,0); for (int miter=0; miter<params.steps; miter++) { cudaMemcpy(dr, dT, sizeof(T)*params.size, cudaMemcpyDeviceToDevice); // r = rhs // 128x32x32 ~ grid dimensions elementWiseMul<<<128,1024>>>(dr, dV, params.Nx, params.Ny); // r = V*r // 32x32 ~ grid dimensions addNeumannBC<<<1,1024>>>(dr, dqB, tHF[miter], params.Nx, params.Ny); // time dependent Neumann boundary here ... r = r + NeumannBC (dqB) SpMVv1<<<128,1024>>>(dq, dcc, dff, dss, dww, dT, params.Nx, params.Ny); // q = Ax (version 1) AXPY<<<128,1024>>>(dr, dq, (T)-1., (T)1., params.Nx, params.Ny); // r = r - q /*#ifdef DEFL // --- (DEFLATION) r = Pr --- ///cudaMemset(dyZ,0,sizeof(T)*(params.nDV+2*params.NxZ)); // reset; ZTransXYDeflation<<<4,64>>>(drZ, dr, params.nRowsZ, params.NxZ, params.Nx); // y1 = Z'*y // E*y2 = y1 (begin) cudaMemcpy(hrZ, drZ, (params.nDV+2*params.NxZ)*sizeof(double), cudaMemcpyDeviceToHost); // copy drZ to hrZ solve(params.NxZ,params.nDV); cudaMemcpy(dyZ, hyZ, (params.nDV+2*params.NxZ)*sizeof(double), cudaMemcpyHostToDevice); //copy hyZ to dyZ ///printX(hyZ, params.NxZ,params.nDV); // E*y2 = y1 (end) YMinusAzXYDeflation<<<32,128>>>(dr,dyZ,dazc,dazw,dazs,params.nRowsZ,params.NxZ,params.Nx); // r = P*r #endif */ SpMVv2<<<128,1024>>>(ds,kc,kff,kfs,kfw,kf,kfe,kfn,kss,ksw,ks,kse,kww,kw,dr, params.Nx,params.Ny, params.Nx*params.Ny*params.Nz,params.Nx*params.Ny); // z = M^(-1)r (version 2) DOTGPU<T,128><<<dimGrid,dimBlock,params.blockSize*sizeof(T)>>>(drh, dr, ds, params.Nx, params.Ny, params.Nz); cudaMemcpy(hrh, drh, params.blocks*sizeof(T), cudaMemcpyDeviceToHost); rhNew = dot(hrh,params.blocks); //stop = rhNew * params.maxres * params.maxres; // --- stop criterion here --- DOTGPU<T,128><<<dimGrid,dimBlock,params.blockSize*sizeof(T)>>>(drh, ds, ds, params.Nx, params.Ny, params.Nz); cudaMemcpy(hrh, drh, params.blocks*sizeof(T), cudaMemcpyDeviceToHost); stop = dot(hrh,params.blocks) * params.maxres * params.maxres; iter = 0; //cout << "stop:" << stop << ", residual: " << rhNew << endl; while (abs(rhNew) > stop && iter < params.maxiter) { //while (iter < 10) { // only testing iter++; totalIter++; //cout << "iteration:" << iter << ", residual: " << abs(rhNew) << endl; if (iter==1) { cudaMemcpy(dp, ds, sizeof(T)*params.size,cudaMemcpyDeviceToDevice); } else { bt = rhNew/rhOld; AXPY<<<128,1024>>>(dp, ds, (T)1., bt, params.Nx, params.Ny); // p = z + beta*p } SpMVv1<<<128,1024>>>(dq, dcc, dff, dss, dww, dp, params.Nx, params.Ny); // q = Ap (version 1) /*#ifdef DEFL // --- (DEFLATION) q = Pq --- ZTransXYDeflation<<<4,64>>>(drZ, dq, params.nRowsZ, params.NxZ, params.Nx); // y1 = Z'*y // E*y2 = y1 (begin) cudaMemcpy(hrZ, drZ, (params.nDV+2*params.NxZ)*sizeof(T), cudaMemcpyDeviceToHost); // copy drZ to hrZ solve(params.NxZ,params.nDV); cudaMemcpy(dyZ, hyZ, (params.nDV+2*params.NxZ)*sizeof(T), cudaMemcpyHostToDevice); //copy hyZ to dyZ // E*y2 = y1 (end) YMinusAzXYDeflation<<<32,128>>>(dq,dyZ,dazc,dazw,dazs,params.nRowsZ,params.NxZ,params.Nx); // q = Pq #endif*/ DOTGPU<T,128><<<dimGrid,dimBlock,params.blockSize*sizeof(T)>>>(dsg, dp, dq, params.Nx, params.Ny, params.Nz); // sigma = <p,q> cudaMemcpy(hsg, dsg, params.blocks*sizeof(T), cudaMemcpyDeviceToHost); sg = dot(hsg,params.blocks); ap = rhNew/sg; // alpha = rhoNew / sigma AXPY<<<128,1024>>>(dr, dq, -ap, (T)1., params.Nx, params.Ny); // r = r - alpha*q AXPY<<<128,1024>>>(dT, dp, ap, (T)1., params.Nx, params.Ny); // x = x + alpha*p SpMVv2<<<128,1024>>>(ds,kc,kff,kfs,kfw,kf,kfe,kfn,kss,ksw,ks,kse,kww,kw,dr, params.Nx,params.Ny, params.Nx*params.Ny*params.Nz,params.Nx*params.Ny); // z = M^(-1)r (version 2) rhOld = rhNew; DOTGPU<T,128><<<dimGrid,dimBlock,params.blockSize*sizeof(T)>>>(drh, dr, ds, params.Nx, params.Ny, params.Nz); // rhoNew = <r,z> cudaMemcpy(hrh, drh, params.blocks*sizeof(T), cudaMemcpyDeviceToHost); rhNew = dot(hrh,params.blocks); } // x = x + (deflation) t += params.dt; //cout << "t= " << setprecision(3) << fixed << t << " s ,tmstp:" << miter << " ,it:" << iter << endl; //if ((miter+1)%4000==0) //{ //cudaMemcpy(hT, dT, sizeof(T)*params.size, cudaMemcpyDeviceToHost); //saveDataInTime(hT, t, "temperature32x32x128", params.Nx, params.Ny, params.Nz); //} } cudaEventRecord(stopT,0); cudaEventSynchronize(stopT); cudaEventElapsedTime(&elapsedTime, startT, stopT); cout<< "ellapsed time (cuda): " << elapsedTime << " miliseconds" << endl; cout << "Simulation finished." << endl; cout << "total number of iterations: " << totalIter << endl; cudaEventDestroy(startT); cudaEventDestroy(stopT); //cudaFinalizeDeflation(); cudaFinalize(); //cpuFinalizeDeflation(); cpuFinalize(); return 0; }
b8ab260ac59c8084b2da0aed4529cac71ed776b3.hip
// !!! This is a file automatically generated by hipify!!! /** * A test program to evaluate the gpu collective communications */ #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include <mpi.h> #include <omp.h> #include "collectives/collectives.h" inline unsigned long long rdtsc() { unsigned a, d; __asm__ volatile("rdtsc" : "=a" (a), "=d" (d)); return ((unsigned long long)a) | (((unsigned long long)d) << 32); } __global__ void square(double *data, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { data[idx] = data[idx] * data[idx]; } } int main(int argc, char *argv[]) { MPI_Init(&argc, &argv); cudaMPI_Init(MPI_COMM_WORLD); const int N = atoi(argv[1]); CUDAMPI_START_DEVICE_THREADS // get my rank int rank; cudaMPI_Comm_rank(&rank); // allocate some space on my device double *d_data; CUDACHECK( hipMalloc(&d_data, N * sizeof(double)) ); CUDACHECK( hipMemset(d_data, 0, N * sizeof(double)) ); // allocate some space on host for error checking double *h_data; CUDACHECK( hipHostMalloc(&h_data, N * sizeof(double)) ); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // if I am rank 0, generate data for everyone if (rank == 0) { int cuda_size; cudaMPI_Comm_size(MPI_COMM_WORLD, &cuda_size); int sendcount = N * cuda_size; double *d_sendbuf; double *h_sendbuf; CUDACHECK( hipMalloc(&d_sendbuf, sendcount * sizeof(double)) ); CUDACHECK( hipHostMalloc(&h_sendbuf, sendcount * sizeof(double)) ); int i; for (i = 0; i < sendcount; ++i) { h_sendbuf[i] = i; } CUDACHECK( hipMemcpy(d_sendbuf, h_sendbuf, sendcount * sizeof(double), hipMemcpyDefault) ); CUDACHECK( hipEventRecord(start) ); cudaMPI_Scatter(d_sendbuf, N, d_data, N, 0, MPI_COMM_WORLD); CUDACHECK( hipEventRecord(stop) ); CUDACHECK( hipHostFree(h_sendbuf) ); CUDACHECK( hipFree(d_sendbuf) ); } else { CUDACHECK( hipEventRecord(start) ); cudaMPI_Scatter(NULL, 0, d_data, N, 0, MPI_COMM_WORLD); CUDACHECK( hipEventRecord(stop) ); } float time; CUDACHECK( hipDeviceSynchronize() ); CUDACHECK( hipEventElapsedTime(&time, start, stop) ); printf("rank %d took %f ms for the scatter\n", rank, time); // run a kernel on said data dim3 gridDim(128); dim3 blockDim(((N - 1) / gridDim.x) + 1); hipLaunchKernelGGL(( square), dim3(gridDim), dim3(blockDim), 0, 0, d_data, N); // do a print to show the results CUDACHECK( hipMemcpy(h_data, d_data, N * sizeof(double), hipMemcpyDefault) ); printf("rank %d results\n", rank); int i; for (i = 0; i < N; ++i) { printf("%lf, ", h_data[i]); } printf("\n"); CUDACHECK( hipFree(d_data) ); CUDACHECK( hipHostFree(h_data) ); CUDAMPI_STOP_DEVICE_THREADS cudaMPI_Finalize(); MPI_Finalize(); }
b8ab260ac59c8084b2da0aed4529cac71ed776b3.cu
/** * A test program to evaluate the gpu collective communications */ #include <stdio.h> #include <string.h> #include <cuda.h> #include <mpi.h> #include <omp.h> #include "collectives/collectives.h" inline unsigned long long rdtsc() { unsigned a, d; __asm__ volatile("rdtsc" : "=a" (a), "=d" (d)); return ((unsigned long long)a) | (((unsigned long long)d) << 32); } __global__ void square(double *data, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { data[idx] = data[idx] * data[idx]; } } int main(int argc, char *argv[]) { MPI_Init(&argc, &argv); cudaMPI_Init(MPI_COMM_WORLD); const int N = atoi(argv[1]); CUDAMPI_START_DEVICE_THREADS // get my rank int rank; cudaMPI_Comm_rank(&rank); // allocate some space on my device double *d_data; CUDACHECK( cudaMalloc(&d_data, N * sizeof(double)) ); CUDACHECK( cudaMemset(d_data, 0, N * sizeof(double)) ); // allocate some space on host for error checking double *h_data; CUDACHECK( cudaMallocHost(&h_data, N * sizeof(double)) ); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // if I am rank 0, generate data for everyone if (rank == 0) { int cuda_size; cudaMPI_Comm_size(MPI_COMM_WORLD, &cuda_size); int sendcount = N * cuda_size; double *d_sendbuf; double *h_sendbuf; CUDACHECK( cudaMalloc(&d_sendbuf, sendcount * sizeof(double)) ); CUDACHECK( cudaMallocHost(&h_sendbuf, sendcount * sizeof(double)) ); int i; for (i = 0; i < sendcount; ++i) { h_sendbuf[i] = i; } CUDACHECK( cudaMemcpy(d_sendbuf, h_sendbuf, sendcount * sizeof(double), cudaMemcpyDefault) ); CUDACHECK( cudaEventRecord(start) ); cudaMPI_Scatter(d_sendbuf, N, d_data, N, 0, MPI_COMM_WORLD); CUDACHECK( cudaEventRecord(stop) ); CUDACHECK( cudaFreeHost(h_sendbuf) ); CUDACHECK( cudaFree(d_sendbuf) ); } else { CUDACHECK( cudaEventRecord(start) ); cudaMPI_Scatter(NULL, 0, d_data, N, 0, MPI_COMM_WORLD); CUDACHECK( cudaEventRecord(stop) ); } float time; CUDACHECK( cudaDeviceSynchronize() ); CUDACHECK( cudaEventElapsedTime(&time, start, stop) ); printf("rank %d took %f ms for the scatter\n", rank, time); // run a kernel on said data dim3 gridDim(128); dim3 blockDim(((N - 1) / gridDim.x) + 1); square<<<gridDim, blockDim>>>(d_data, N); // do a print to show the results CUDACHECK( cudaMemcpy(h_data, d_data, N * sizeof(double), cudaMemcpyDefault) ); printf("rank %d results\n", rank); int i; for (i = 0; i < N; ++i) { printf("%lf, ", h_data[i]); } printf("\n"); CUDACHECK( cudaFree(d_data) ); CUDACHECK( cudaFreeHost(h_data) ); CUDAMPI_STOP_DEVICE_THREADS cudaMPI_Finalize(); MPI_Finalize(); }
0cb8da96e0b54c48c96c59bc9385fdbd77a39d84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/gpu_util.cuh" #include "caffe/layers/st_layer.hpp" #include "caffe/util/benchmark.hpp" namespace caffe { template <typename Dtype> __global__ void set_value_to_constant(const int nthreads, Dtype value, int size, int i, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { dst[index * size + i] = value; } } template <typename Dtype> __global__ void copy_values(const int nthreads, int size_src, int k, const Dtype* src, int size_dst, int i, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { dst[index * size_dst + i] = src[index * size_src + k]; } } template <typename Dtype> __global__ void SpatialTransformerForwardGPU(const int nthreads, int N, int C, int output_H_, int output_W_, int H, int W, const Dtype* input_grid_data, const Dtype* U, Dtype* V) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i; const int row_idx = output_W_ * s + t; const Dtype px = coordinates[row_idx * 2]; const Dtype py = coordinates[row_idx * 2 + 1]; const int V_offset = index; V[V_offset] = (Dtype)0.; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; int m, n; Dtype w; const Dtype* pic = U + i * (C * H * W) + j * (H * W); m = floor(x); n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (y - n)); V[V_offset] += w * pic[m * W + n]; } m = floor(x) + 1; n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (y - n)); V[V_offset] += w * pic[m * W + n]; } m = floor(x); n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (n - y)); V[V_offset] += w * pic[m * W + n]; } m = floor(x) + 1; n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (n - y)); V[V_offset] += w * pic[m * W + n]; } } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { string prefix = "SpatialTransformerLayer::Forward_gpu::\t"; const Dtype* U = bottom[0]->gpu_data(); const Dtype* theta = bottom[1]->gpu_data(); const Dtype* output_grid_data = output_grid.gpu_data(); Dtype* full_theta_data = full_theta.mutable_gpu_data(); Dtype* input_grid_data = input_grid.mutable_gpu_data(); Dtype* V = top[0]->mutable_gpu_data(); //std::cout << "stn caffe gpu set" <<std::endl; caffe_gpu_set(input_grid.count(), (Dtype)0, input_grid_data); caffe_gpu_set(top[0]->count(), (Dtype)0, V); // compute full_theta // std::cout << "computing full_theta" <<std::endl; int k = 0; const int num_threads = N; for(int i=0; i<6; ++i) { if(is_pre_defined_theta[i]) { hipLaunchKernelGGL(( set_value_to_constant<Dtype>), dim3(CAFFE_GET_BLOCKS(num_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_threads, pre_defined_theta[i], 6, i, full_theta_data); //std::cout << "Setting value " << pre_defined_theta[i] << " to "<< i << // "/6 of full_theta_data" << std::endl; } else { hipLaunchKernelGGL(( copy_values<Dtype>), dim3(CAFFE_GET_BLOCKS(num_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_threads, 6 - pre_defined_count, k, theta, 6, i, full_theta_data); //std::cout << "Copying " << k << "/" << 6 - pre_defined_count << " of theta to " // << i << "/6 of full_theta_data" << std::endl; ++ k; } } // compute out input_grid_data for(int i = 0; i < N; ++i) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, output_H_ * output_W_, 2, 3, (Dtype)1., output_grid_data, full_theta_data + 6 * i, (Dtype)0., input_grid_data + (output_H_ * output_W_ * 2) * i); } const int nthreads = N * C * output_H_ * output_W_; hipLaunchKernelGGL(( SpatialTransformerForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N, C, output_H_, output_W_, H, W, input_grid_data, U, V); } template <typename Dtype> __global__ void SpatialTransformerBackwardGPU_dTheta(const int nthreads, int C, int output_H_, int output_W_, int H, int W, const Dtype* input_grid_data, const Dtype* dV_array, const Dtype* U_array, Dtype* dTheta_tmp_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i; const int row_idx = output_W_ * s + t; const Dtype px = coordinates[row_idx * 2]; const Dtype py = coordinates[row_idx * 2 + 1]; Dtype delta_dpx = (Dtype)0.; Dtype delta_dpy = (Dtype)0.; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; const int dV_offset = index; const Dtype dV = dV_array[dV_offset]; int m, n; const Dtype* U = U_array + i * (C * H * W) + j * (H * W); // left-bottom neighbor m = floor(x); n = floor(y); if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx -= (1 - (y - n)) * U[m * W + n] * dV * H / 2; delta_dpy -= (1 - (x - m)) * U[m * W + n] * dV * W / 2; } // left-top neighbor m = floor(x); n = floor(y) + 1; if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx -= (1 - (n - y)) * U[m * W + n] * dV * H / 2; delta_dpy += (1 - (x - m)) * U[m * W + n] * dV * W / 2; } // right-bottom neighbor m = floor(x) + 1; n = floor(y); if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx += (1 - (y - n)) * U[m * W + n] * dV * H / 2; delta_dpy -= (1 - (m - x)) * U[m * W + n] * dV * W / 2; } // right-top neighbor m = floor(x) + 1; n = floor(y) + 1; if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx += (1 - (n - y)) * U[m * W + n] * dV * H / 2; delta_dpy += (1 - (m - x)) * U[m * W + n] * dV * W / 2; } int idx = j * (output_H_ * output_W_) + s * output_W_ + t; dTheta_tmp_diff[(6 * i) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (s * 1.0 / output_H_ * 2 - 1); dTheta_tmp_diff[(6 * i + 1) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (t * 1.0 / output_W_ * 2 - 1); dTheta_tmp_diff[(6 * i + 2) * (output_H_ * output_W_ * C) + idx] += delta_dpx; dTheta_tmp_diff[(6 * i + 3) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (s * 1.0 / output_H_ * 2 - 1); dTheta_tmp_diff[(6 * i + 4) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (t * 1.0 / output_W_ * 2 - 1); dTheta_tmp_diff[(6 * i + 5) * (output_H_ * output_W_ * C) + idx] += delta_dpy; } } template <typename Dtype> __global__ void SpatialTransformerBackwardGPU_dU(const int nthreads, const int C, const int W, const int H, const int output_H_, const int output_W_, const Dtype* input_grid_data, const Dtype* dV, Dtype* dU) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i; const int row_idx = output_W_ * s + t; const Dtype px = coordinates[row_idx * 2]; const Dtype py = coordinates[row_idx * 2 + 1]; const int V_offset = index; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; int m, n; Dtype w; Dtype* pic = dU + i * (C * H * W) + j * (H * W); m = floor(x); n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (y - n)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x) + 1; n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (y - n)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x); n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (n - y)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x) + 1; n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (n - y)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { string prefix = "SpatialTransformerLayer::Backward_GPU::\t"; const Dtype* dV = top[0]->gpu_diff(); const Dtype* input_grid_data = input_grid.gpu_data(); const Dtype* U = bottom[0]->gpu_data(); Dtype* dFull_theta = full_theta.mutable_gpu_diff(); Dtype* dTheta = bottom[1]->mutable_gpu_diff(); Dtype* dTheta_tmp_diff = dTheta_tmp.mutable_gpu_diff(); caffe_gpu_set(dTheta_tmp.count(), (Dtype)0., dTheta_tmp_diff); const int nthreads = N * C * output_H_ * output_W_; hipLaunchKernelGGL(( SpatialTransformerBackwardGPU_dTheta<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, C, output_H_, output_W_, H, W, input_grid_data, dV, U, dTheta_tmp_diff); Dtype* all_ones_2_data = all_ones_2.mutable_gpu_data(); caffe_gpu_set(all_ones_2.count(), (Dtype)1., all_ones_2_data); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, full_theta.count(), 1, output_H_ * output_W_ * C, (Dtype)1., dTheta_tmp_diff, all_ones_2_data, (Dtype)0., dFull_theta); /*const Dtype* db_dFull_theta = full_theta.cpu_diff(); for(int i=0; i<full_theta.count(); ++i) { std::cout << db_dFull_theta[i] << " "; } std::cout<<std::endl;*/ int k = 0; const int num_threads = N; for(int i=0; i<6; ++i) { if(!is_pre_defined_theta[i]) { hipLaunchKernelGGL(( copy_values<Dtype>), dim3(CAFFE_GET_BLOCKS(num_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_threads, 6, i, dFull_theta, 6 - pre_defined_count, k, dTheta); //std::cout << "Copying " << i << "/6 of dFull_theta to " << k << "/" << // 6 - pre_defined_count << " of dTheta" << std::endl; ++ k; } } /*const Dtype* db_dtheta = bottom[1]->cpu_diff(); for(int i=0; i<bottom[1]->count(); ++i) { std::cout << db_dtheta[i] << " "; } std::cout<<std::endl;*/ if(to_compute_dU_) { Dtype* dU = bottom[0]->mutable_gpu_diff(); caffe_gpu_set(bottom[0]->count(), (Dtype)0., dU); const int nthreads = N * C * output_H_ * output_W_; hipLaunchKernelGGL(( SpatialTransformerBackwardGPU_dU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, C, W, H, output_H_, output_W_, input_grid_data, dV, dU); } } INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer); } // namespace caffe
0cb8da96e0b54c48c96c59bc9385fdbd77a39d84.cu
#include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/gpu_util.cuh" #include "caffe/layers/st_layer.hpp" #include "caffe/util/benchmark.hpp" namespace caffe { template <typename Dtype> __global__ void set_value_to_constant(const int nthreads, Dtype value, int size, int i, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { dst[index * size + i] = value; } } template <typename Dtype> __global__ void copy_values(const int nthreads, int size_src, int k, const Dtype* src, int size_dst, int i, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { dst[index * size_dst + i] = src[index * size_src + k]; } } template <typename Dtype> __global__ void SpatialTransformerForwardGPU(const int nthreads, int N, int C, int output_H_, int output_W_, int H, int W, const Dtype* input_grid_data, const Dtype* U, Dtype* V) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i; const int row_idx = output_W_ * s + t; const Dtype px = coordinates[row_idx * 2]; const Dtype py = coordinates[row_idx * 2 + 1]; const int V_offset = index; V[V_offset] = (Dtype)0.; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; int m, n; Dtype w; const Dtype* pic = U + i * (C * H * W) + j * (H * W); m = floor(x); n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (y - n)); V[V_offset] += w * pic[m * W + n]; } m = floor(x) + 1; n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (y - n)); V[V_offset] += w * pic[m * W + n]; } m = floor(x); n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (n - y)); V[V_offset] += w * pic[m * W + n]; } m = floor(x) + 1; n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (n - y)); V[V_offset] += w * pic[m * W + n]; } } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { string prefix = "SpatialTransformerLayer::Forward_gpu::\t"; const Dtype* U = bottom[0]->gpu_data(); const Dtype* theta = bottom[1]->gpu_data(); const Dtype* output_grid_data = output_grid.gpu_data(); Dtype* full_theta_data = full_theta.mutable_gpu_data(); Dtype* input_grid_data = input_grid.mutable_gpu_data(); Dtype* V = top[0]->mutable_gpu_data(); //std::cout << "stn caffe gpu set" <<std::endl; caffe_gpu_set(input_grid.count(), (Dtype)0, input_grid_data); caffe_gpu_set(top[0]->count(), (Dtype)0, V); // compute full_theta // std::cout << "computing full_theta" <<std::endl; int k = 0; const int num_threads = N; for(int i=0; i<6; ++i) { if(is_pre_defined_theta[i]) { set_value_to_constant<Dtype><<<CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS>>>( num_threads, pre_defined_theta[i], 6, i, full_theta_data); //std::cout << "Setting value " << pre_defined_theta[i] << " to "<< i << // "/6 of full_theta_data" << std::endl; } else { copy_values<Dtype><<<CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS>>>(num_threads, 6 - pre_defined_count, k, theta, 6, i, full_theta_data); //std::cout << "Copying " << k << "/" << 6 - pre_defined_count << " of theta to " // << i << "/6 of full_theta_data" << std::endl; ++ k; } } // compute out input_grid_data for(int i = 0; i < N; ++i) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, output_H_ * output_W_, 2, 3, (Dtype)1., output_grid_data, full_theta_data + 6 * i, (Dtype)0., input_grid_data + (output_H_ * output_W_ * 2) * i); } const int nthreads = N * C * output_H_ * output_W_; SpatialTransformerForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, N, C, output_H_, output_W_, H, W, input_grid_data, U, V); } template <typename Dtype> __global__ void SpatialTransformerBackwardGPU_dTheta(const int nthreads, int C, int output_H_, int output_W_, int H, int W, const Dtype* input_grid_data, const Dtype* dV_array, const Dtype* U_array, Dtype* dTheta_tmp_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i; const int row_idx = output_W_ * s + t; const Dtype px = coordinates[row_idx * 2]; const Dtype py = coordinates[row_idx * 2 + 1]; Dtype delta_dpx = (Dtype)0.; Dtype delta_dpy = (Dtype)0.; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; const int dV_offset = index; const Dtype dV = dV_array[dV_offset]; int m, n; const Dtype* U = U_array + i * (C * H * W) + j * (H * W); // left-bottom neighbor m = floor(x); n = floor(y); if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx -= (1 - (y - n)) * U[m * W + n] * dV * H / 2; delta_dpy -= (1 - (x - m)) * U[m * W + n] * dV * W / 2; } // left-top neighbor m = floor(x); n = floor(y) + 1; if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx -= (1 - (n - y)) * U[m * W + n] * dV * H / 2; delta_dpy += (1 - (x - m)) * U[m * W + n] * dV * W / 2; } // right-bottom neighbor m = floor(x) + 1; n = floor(y); if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx += (1 - (y - n)) * U[m * W + n] * dV * H / 2; delta_dpy -= (1 - (m - x)) * U[m * W + n] * dV * W / 2; } // right-top neighbor m = floor(x) + 1; n = floor(y) + 1; if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx += (1 - (n - y)) * U[m * W + n] * dV * H / 2; delta_dpy += (1 - (m - x)) * U[m * W + n] * dV * W / 2; } int idx = j * (output_H_ * output_W_) + s * output_W_ + t; dTheta_tmp_diff[(6 * i) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (s * 1.0 / output_H_ * 2 - 1); dTheta_tmp_diff[(6 * i + 1) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (t * 1.0 / output_W_ * 2 - 1); dTheta_tmp_diff[(6 * i + 2) * (output_H_ * output_W_ * C) + idx] += delta_dpx; dTheta_tmp_diff[(6 * i + 3) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (s * 1.0 / output_H_ * 2 - 1); dTheta_tmp_diff[(6 * i + 4) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (t * 1.0 / output_W_ * 2 - 1); dTheta_tmp_diff[(6 * i + 5) * (output_H_ * output_W_ * C) + idx] += delta_dpy; } } template <typename Dtype> __global__ void SpatialTransformerBackwardGPU_dU(const int nthreads, const int C, const int W, const int H, const int output_H_, const int output_W_, const Dtype* input_grid_data, const Dtype* dV, Dtype* dU) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i; const int row_idx = output_W_ * s + t; const Dtype px = coordinates[row_idx * 2]; const Dtype py = coordinates[row_idx * 2 + 1]; const int V_offset = index; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; int m, n; Dtype w; Dtype* pic = dU + i * (C * H * W) + j * (H * W); m = floor(x); n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (y - n)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x) + 1; n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (y - n)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x); n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (n - y)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x) + 1; n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (n - y)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { string prefix = "SpatialTransformerLayer::Backward_GPU::\t"; const Dtype* dV = top[0]->gpu_diff(); const Dtype* input_grid_data = input_grid.gpu_data(); const Dtype* U = bottom[0]->gpu_data(); Dtype* dFull_theta = full_theta.mutable_gpu_diff(); Dtype* dTheta = bottom[1]->mutable_gpu_diff(); Dtype* dTheta_tmp_diff = dTheta_tmp.mutable_gpu_diff(); caffe_gpu_set(dTheta_tmp.count(), (Dtype)0., dTheta_tmp_diff); const int nthreads = N * C * output_H_ * output_W_; SpatialTransformerBackwardGPU_dTheta<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, C, output_H_, output_W_, H, W, input_grid_data, dV, U, dTheta_tmp_diff); Dtype* all_ones_2_data = all_ones_2.mutable_gpu_data(); caffe_gpu_set(all_ones_2.count(), (Dtype)1., all_ones_2_data); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, full_theta.count(), 1, output_H_ * output_W_ * C, (Dtype)1., dTheta_tmp_diff, all_ones_2_data, (Dtype)0., dFull_theta); /*const Dtype* db_dFull_theta = full_theta.cpu_diff(); for(int i=0; i<full_theta.count(); ++i) { std::cout << db_dFull_theta[i] << " "; } std::cout<<std::endl;*/ int k = 0; const int num_threads = N; for(int i=0; i<6; ++i) { if(!is_pre_defined_theta[i]) { copy_values<Dtype><<<CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS>>>(num_threads, 6, i, dFull_theta, 6 - pre_defined_count, k, dTheta); //std::cout << "Copying " << i << "/6 of dFull_theta to " << k << "/" << // 6 - pre_defined_count << " of dTheta" << std::endl; ++ k; } } /*const Dtype* db_dtheta = bottom[1]->cpu_diff(); for(int i=0; i<bottom[1]->count(); ++i) { std::cout << db_dtheta[i] << " "; } std::cout<<std::endl;*/ if(to_compute_dU_) { Dtype* dU = bottom[0]->mutable_gpu_diff(); caffe_gpu_set(bottom[0]->count(), (Dtype)0., dU); const int nthreads = N * C * output_H_ * output_W_; SpatialTransformerBackwardGPU_dU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, C, W, H, output_H_, output_W_, input_grid_data, dV, dU); } } INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer); } // namespace caffe
a68d951371483d36d160e194de224c9b497f3ae7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=[1200,1,1] --blockDim=[256,1,1] __global__ void AddKernel(const float *op1, const float *op2, int count, float *sum) { const int pos = threadIdx.x + blockIdx.x * blockDim.x; if (pos >= count) return; sum[pos] = op1[pos] + op2[pos]; }
a68d951371483d36d160e194de224c9b497f3ae7.cu
//pass //--gridDim=[1200,1,1] --blockDim=[256,1,1] __global__ void AddKernel(const float *op1, const float *op2, int count, float *sum) { const int pos = threadIdx.x + blockIdx.x * blockDim.x; if (pos >= count) return; sum[pos] = op1[pos] + op2[pos]; }
5351ba37023b2acaadb7cabb03032918e3841ed6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/kernel/util/cuda_arithemetic_interface.h" #include "oneflow/core/common/switch_func.h" #include "oneflow/core/kernel/util/host_arithemetic_interface.h" #include "oneflow/core/kernel/new_kernel_util.h" #include "oneflow/core/register/blob.h" #include "oneflow/core/kernel/util/cuda_half_util.h" #include "oneflow/core/cuda/elementwise.cuh" namespace oneflow { namespace { template<int32_t NDIMS> struct Int32Array { int32_t val[NDIMS]; }; template<int32_t NDIMS> __device__ int32_t GetXIndex(const int32_t* y_shape, const int32_t* x_strides, int32_t y_idx) { int32_t x_idx = 0; for (int32_t i = NDIMS - 1; i >= 0; --i) { const int32_t next_y_idx = y_idx / y_shape[i]; x_idx += (y_idx - next_y_idx * y_shape[i]) * x_strides[i]; y_idx = next_y_idx; } return x_idx; } template<int32_t NDIMS, typename T> __global__ void TransposeGpu(const Int32Array<NDIMS> y_shape, const Int32Array<NDIMS> x_strides, const int32_t elem_cnt, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(y_idx, elem_cnt) { const int32_t x_idx = GetXIndex<NDIMS>(y_shape.val, x_strides.val, y_idx); #if __CUDA_ARCH__ >= 350 y[y_idx] = __ldg(x + x_idx); #else y[y_idx] = x[x_idx]; #endif } } template<int32_t NDIMS, typename T> void LaunchTransposeGpu(DeviceCtx* ctx, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const T* x, T* y) { CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>()); Int32Array<NDIMS> y_shape_struct; FOR_RANGE(int32_t, i, 0, NDIMS) { y_shape_struct.val[i] = y_shape.At(i); } Int32Array<NDIMS> x_strides; int32_t buff[NDIMS]; int32_t cur_stride = 1; for (int32_t i = NDIMS - 1; i >= 0; --i) { buff[i] = cur_stride; cur_stride *= x_shape.At(i); } for (int32_t i = 0; i < NDIMS; ++i) { x_strides.val[i] = buff[permutation[i]]; } if (elem_cnt == 0) { return; } hipLaunchKernelGGL(( TransposeGpu<NDIMS, T>) , dim3(SMBlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), y_shape_struct, x_strides, elem_cnt, x, y); } template<int32_t NDIMS, typename T> void TransposeImpl(DeviceCtx* ctx, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const T* x, T* y) { CHECK_EQ(x_shape.NumAxes(), NDIMS); CHECK_EQ(y_shape.NumAxes(), NDIMS); using PackType = int64_t; const size_t pack_size = sizeof(PackType) / sizeof(T); int64_t in_last_dim = x_shape.At(x_shape.NumAxes() - 1); int64_t out_last_dim = y_shape.At(y_shape.NumAxes() - 1); if (pack_size != 1 && permutation.back() == permutation.size() - 1 && in_last_dim % pack_size == 0) { CHECK_EQ(in_last_dim, out_last_dim); DimVector packed_in_dim_vec; x_shape.ToDimVector(&packed_in_dim_vec); packed_in_dim_vec.back() /= pack_size; Shape packed_in_shape(packed_in_dim_vec); DimVector packed_out_dim_vec; y_shape.ToDimVector(&packed_out_dim_vec); packed_out_dim_vec.back() /= pack_size; Shape packed_out_shape(packed_out_dim_vec); LaunchTransposeGpu<NDIMS, PackType>( ctx, ShapeView(packed_in_shape), ShapeView(packed_out_shape), permutation, packed_in_shape.elem_cnt(), reinterpret_cast<const PackType*>(x), reinterpret_cast<PackType*>(y)); } else { LaunchTransposeGpu<NDIMS, T>(ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } } template<typename T> struct TransposeUtil final { #define MAKE_TRANSPOSE_SWITCH_ENTRY(func_name, NDIMS) func_name<NDIMS, T> DEFINE_STATIC_SWITCH_FUNC(void, TransposeImpl, MAKE_TRANSPOSE_SWITCH_ENTRY, MAKE_NDIM_CTRV_SEQ(DIM_SEQ)) }; } // namespace #define TRANSPOSE_CHECK \ CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>()); \ CHECK_EQ(num_axis, y_shape.NumAxes()); \ CHECK_EQ(num_axis, x_shape.NumAxes()) void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const float* x, float* y) { TRANSPOSE_CHECK; TransposeUtil<float>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const double* x, double* y) { TRANSPOSE_CHECK; TransposeUtil<double>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const float16* x, float16* y) { TRANSPOSE_CHECK; TransposeUtil<half>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const int8_t* x, int8_t* y) { TRANSPOSE_CHECK; TransposeUtil<int8_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const uint8_t* x, uint8_t* y) { TRANSPOSE_CHECK; TransposeUtil<uint8_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const int32_t* x, int32_t* y) { TRANSPOSE_CHECK; TransposeUtil<int32_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const int64_t* x, int64_t* y) { TRANSPOSE_CHECK; TransposeUtil<int64_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } #undef TRANSPOSE_CHECK void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const float* x, float* y) { ArithemeticIf<DeviceType::kGPU>::Transpose( ctx, num_axis, x_shape, y_shape, std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const double* x, double* y) { ArithemeticIf<DeviceType::kGPU>::Transpose( ctx, num_axis, x_shape, y_shape, std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const float16* x, float16* y) { ArithemeticIf<DeviceType::kGPU>::Transpose( ctx, num_axis, x_shape, y_shape, std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const int8_t* x, int8_t* y) { ArithemeticIf<DeviceType::kGPU>::Transpose( ctx, num_axis, x_shape, y_shape, std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const uint8_t* x, uint8_t* y) { ArithemeticIf<DeviceType::kGPU>::Transpose( ctx, num_axis, x_shape, y_shape, std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const int32_t* x, int32_t* y) { ArithemeticIf<DeviceType::kGPU>::Transpose( ctx, num_axis, x_shape, y_shape, std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const int64_t* x, int64_t* y) { ArithemeticIf<DeviceType::kGPU>::Transpose( ctx, num_axis, x_shape, y_shape, std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::InitializeWithConstConf( DeviceCtx* ctx, const ConstantInitializerConf& initializer_conf, Blob* blob) { WithHostBlobAndStreamSynchronizeEnv(ctx, blob, [&](Blob* host_blob) { ArithemeticIf<DeviceType::kCPU>::InitializeWithConstConf(nullptr, initializer_conf, host_blob); }); } namespace { template<typename T> struct AddOp { __device__ T operator()(T a, T b) const { return a + b; } }; template<typename T> struct SubOp { __device__ T operator()(T a, T b) const { return a - b; } }; template<typename T> struct MulOp { __device__ T operator()(T a, T b) const { return a * b; } }; template<typename T> struct DivOp { __device__ T operator()(T a, T b) const { return a / b; } }; template<template<typename> typename Op, typename T> struct UnaryByScalarFunctor { __host__ __device__ explicit UnaryByScalarFunctor(T scalar) : scalar(scalar) {} __device__ T operator()(T a) const { return Op<T>()(a, scalar); } const T scalar; }; template<template<typename> typename Op, typename T> struct UnaryByScalarPtrFunctorFactory { __host__ __device__ explicit UnaryByScalarPtrFunctorFactory(const T* scalar_ptr) : scalar_ptr(scalar_ptr) {} __device__ UnaryByScalarFunctor<Op, T> operator()() const { return UnaryByScalarFunctor<Op, T>(*scalar_ptr); } const T* scalar_ptr; }; template<template<typename> typename Op, typename T> void LaunchUnaryByScalar(DeviceCtx* ctx, const int64_t n, const T* x, const T y, T* z) { OF_CUDA_CHECK( (cuda::elementwise::Unary(UnaryByScalarFunctor<Op, T>(y), n, z, x, ctx->cuda_stream()))); } template<template<typename> typename Op, typename T> void LaunchUnaryByScalarPtr(DeviceCtx* ctx, const int64_t n, const T* x, const T* y, T* z) { OF_CUDA_CHECK((cuda::elementwise::UnaryWithFactory(UnaryByScalarPtrFunctorFactory<Op, T>(y), n, z, x, ctx->cuda_stream()))); } template<typename T> __global__ void FillGpu(const int64_t n, const T value, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = value; } } template<typename T> __global__ void CopyColsRegionGpu(const int64_t row_num, const int64_t col_num, const T* x, const int64_t x_col_offset, const int64_t x_lda, T* y, const int64_t y_col_offset, const int64_t y_lda) { CUDA_1D_KERNEL_LOOP(index, row_num * col_num) { const int64_t i = index / col_num; const int64_t j = index % col_num; y[i * y_lda + y_col_offset + j] = x[i * x_lda + x_col_offset + j]; } } } // namespace #define OP_BY_SCALAR(op, T) \ void ArithemeticIf<DeviceType::kGPU>::op##ByScalar(DeviceCtx* ctx, const int64_t n, const T* x, \ const T y, T* z) { \ LaunchUnaryByScalar<op##Op, T>(ctx, n, x, y, z); \ } #define OP_BY_SCALAR_HALF(op) \ void ArithemeticIf<DeviceType::kGPU>::op##ByScalar( \ DeviceCtx* ctx, const int64_t n, const float16* x, const float16 y, float16* z) { \ LaunchUnaryByScalar<op##Op, half>(ctx, n, reinterpret_cast<const half*>(x), float16_2half(y), \ reinterpret_cast<half*>(z)); \ } #define DEFINE_OP_BY_SCALAR(op) \ OP_BY_SCALAR(op, float) \ OP_BY_SCALAR(op, double) \ OP_BY_SCALAR(op, int8_t) \ OP_BY_SCALAR(op, int32_t) \ OP_BY_SCALAR(op, int64_t) \ OP_BY_SCALAR_HALF(op) DEFINE_OP_BY_SCALAR(Mul) DEFINE_OP_BY_SCALAR(Add) #define OP_BY_SCALAR_PTR(op, T) \ void ArithemeticIf<DeviceType::kGPU>::op##ByScalarPtr(DeviceCtx* ctx, const int64_t n, \ const T* x, const T* y, T* z) { \ LaunchUnaryByScalarPtr<op##Op, T>(ctx, n, x, y, z); \ } #define OP_BY_SCALAR_PTR_HALF(op) \ void ArithemeticIf<DeviceType::kGPU>::op##ByScalarPtr( \ DeviceCtx* ctx, const int64_t n, const float16* x, const float16* y, float16* z) { \ LaunchUnaryByScalarPtr<op##Op, half>(ctx, n, reinterpret_cast<const half*>(x), \ reinterpret_cast<const half*>(y), \ reinterpret_cast<half*>(z)); \ } #define DEFINE_OP_BY_SCALAR_PTR(op) \ OP_BY_SCALAR_PTR(op, float) \ OP_BY_SCALAR_PTR(op, double) \ OP_BY_SCALAR_PTR(op, int8_t) \ OP_BY_SCALAR_PTR(op, int32_t) \ OP_BY_SCALAR_PTR(op, int64_t) \ OP_BY_SCALAR_PTR_HALF(op) DEFINE_OP_BY_SCALAR_PTR(Mul) DEFINE_OP_BY_SCALAR_PTR(Add) DEFINE_OP_BY_SCALAR_PTR(Sub) DEFINE_OP_BY_SCALAR_PTR(Div) #define FILL(T) \ void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const T value, \ T* y) { \ hipLaunchKernelGGL(( FillGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), \ n, value, y); \ } FILL(float) FILL(double) FILL(uint8_t); FILL(int8_t) FILL(int32_t) FILL(int64_t) #undef FILL void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const float16 value, float16* y) { hipLaunchKernelGGL(( FillGpu<half>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, float16_2half(value), reinterpret_cast<half*>(y)); } #define COPY_COLS_REGION(T) \ void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion( \ DeviceCtx* ctx, const int64_t row_num, const int64_t col_num, const T* x, \ const int64_t x_col_offset, const int64_t x_lda, T* y, const int64_t y_col_offset, \ const int64_t y_lda) { \ hipLaunchKernelGGL(( CopyColsRegionGpu<T>), dim3(BlocksNum4ThreadsNum(row_num* col_num)), dim3(kCudaThreadsNumPerBlock), 0, \ ctx->cuda_stream(), row_num, col_num, x, x_col_offset, x_lda, y, \ y_col_offset, y_lda); \ } COPY_COLS_REGION(float) COPY_COLS_REGION(double) COPY_COLS_REGION(uint8_t) COPY_COLS_REGION(int8_t) COPY_COLS_REGION(int32_t) COPY_COLS_REGION(int64_t) #undef COPY_COLS_REGION void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion(DeviceCtx* ctx, const int64_t row_num, const int64_t col_num, const float16* x, const int64_t x_col_offset, const int64_t x_lda, float16* y, const int64_t y_col_offset, const int64_t y_lda) { hipLaunchKernelGGL(( CopyColsRegionGpu<half>) , dim3(BlocksNum4ThreadsNum(row_num * col_num)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), row_num, col_num, reinterpret_cast<const half*>(x), x_col_offset, x_lda, reinterpret_cast<half*>(y), y_col_offset, y_lda); } } // namespace oneflow
5351ba37023b2acaadb7cabb03032918e3841ed6.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/kernel/util/cuda_arithemetic_interface.h" #include "oneflow/core/common/switch_func.h" #include "oneflow/core/kernel/util/host_arithemetic_interface.h" #include "oneflow/core/kernel/new_kernel_util.h" #include "oneflow/core/register/blob.h" #include "oneflow/core/kernel/util/cuda_half_util.h" #include "oneflow/core/cuda/elementwise.cuh" namespace oneflow { namespace { template<int32_t NDIMS> struct Int32Array { int32_t val[NDIMS]; }; template<int32_t NDIMS> __device__ int32_t GetXIndex(const int32_t* y_shape, const int32_t* x_strides, int32_t y_idx) { int32_t x_idx = 0; for (int32_t i = NDIMS - 1; i >= 0; --i) { const int32_t next_y_idx = y_idx / y_shape[i]; x_idx += (y_idx - next_y_idx * y_shape[i]) * x_strides[i]; y_idx = next_y_idx; } return x_idx; } template<int32_t NDIMS, typename T> __global__ void TransposeGpu(const Int32Array<NDIMS> y_shape, const Int32Array<NDIMS> x_strides, const int32_t elem_cnt, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(y_idx, elem_cnt) { const int32_t x_idx = GetXIndex<NDIMS>(y_shape.val, x_strides.val, y_idx); #if __CUDA_ARCH__ >= 350 y[y_idx] = __ldg(x + x_idx); #else y[y_idx] = x[x_idx]; #endif } } template<int32_t NDIMS, typename T> void LaunchTransposeGpu(DeviceCtx* ctx, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const T* x, T* y) { CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>()); Int32Array<NDIMS> y_shape_struct; FOR_RANGE(int32_t, i, 0, NDIMS) { y_shape_struct.val[i] = y_shape.At(i); } Int32Array<NDIMS> x_strides; int32_t buff[NDIMS]; int32_t cur_stride = 1; for (int32_t i = NDIMS - 1; i >= 0; --i) { buff[i] = cur_stride; cur_stride *= x_shape.At(i); } for (int32_t i = 0; i < NDIMS; ++i) { x_strides.val[i] = buff[permutation[i]]; } if (elem_cnt == 0) { return; } TransposeGpu<NDIMS, T> <<<SMBlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( y_shape_struct, x_strides, elem_cnt, x, y); } template<int32_t NDIMS, typename T> void TransposeImpl(DeviceCtx* ctx, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const T* x, T* y) { CHECK_EQ(x_shape.NumAxes(), NDIMS); CHECK_EQ(y_shape.NumAxes(), NDIMS); using PackType = int64_t; const size_t pack_size = sizeof(PackType) / sizeof(T); int64_t in_last_dim = x_shape.At(x_shape.NumAxes() - 1); int64_t out_last_dim = y_shape.At(y_shape.NumAxes() - 1); if (pack_size != 1 && permutation.back() == permutation.size() - 1 && in_last_dim % pack_size == 0) { CHECK_EQ(in_last_dim, out_last_dim); DimVector packed_in_dim_vec; x_shape.ToDimVector(&packed_in_dim_vec); packed_in_dim_vec.back() /= pack_size; Shape packed_in_shape(packed_in_dim_vec); DimVector packed_out_dim_vec; y_shape.ToDimVector(&packed_out_dim_vec); packed_out_dim_vec.back() /= pack_size; Shape packed_out_shape(packed_out_dim_vec); LaunchTransposeGpu<NDIMS, PackType>( ctx, ShapeView(packed_in_shape), ShapeView(packed_out_shape), permutation, packed_in_shape.elem_cnt(), reinterpret_cast<const PackType*>(x), reinterpret_cast<PackType*>(y)); } else { LaunchTransposeGpu<NDIMS, T>(ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } } template<typename T> struct TransposeUtil final { #define MAKE_TRANSPOSE_SWITCH_ENTRY(func_name, NDIMS) func_name<NDIMS, T> DEFINE_STATIC_SWITCH_FUNC(void, TransposeImpl, MAKE_TRANSPOSE_SWITCH_ENTRY, MAKE_NDIM_CTRV_SEQ(DIM_SEQ)) }; } // namespace #define TRANSPOSE_CHECK \ CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>()); \ CHECK_EQ(num_axis, y_shape.NumAxes()); \ CHECK_EQ(num_axis, x_shape.NumAxes()) void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const float* x, float* y) { TRANSPOSE_CHECK; TransposeUtil<float>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const double* x, double* y) { TRANSPOSE_CHECK; TransposeUtil<double>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const float16* x, float16* y) { TRANSPOSE_CHECK; TransposeUtil<half>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const int8_t* x, int8_t* y) { TRANSPOSE_CHECK; TransposeUtil<int8_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const uint8_t* x, uint8_t* y) { TRANSPOSE_CHECK; TransposeUtil<uint8_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const int32_t* x, int32_t* y) { TRANSPOSE_CHECK; TransposeUtil<int32_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int32_t>& permutation, const int64_t elem_cnt, const int64_t* x, int64_t* y) { TRANSPOSE_CHECK; TransposeUtil<int64_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } #undef TRANSPOSE_CHECK void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const float* x, float* y) { ArithemeticIf<DeviceType::kGPU>::Transpose( ctx, num_axis, x_shape, y_shape, std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const double* x, double* y) { ArithemeticIf<DeviceType::kGPU>::Transpose( ctx, num_axis, x_shape, y_shape, std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const float16* x, float16* y) { ArithemeticIf<DeviceType::kGPU>::Transpose( ctx, num_axis, x_shape, y_shape, std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const int8_t* x, int8_t* y) { ArithemeticIf<DeviceType::kGPU>::Transpose( ctx, num_axis, x_shape, y_shape, std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const uint8_t* x, uint8_t* y) { ArithemeticIf<DeviceType::kGPU>::Transpose( ctx, num_axis, x_shape, y_shape, std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const int32_t* x, int32_t* y) { ArithemeticIf<DeviceType::kGPU>::Transpose( ctx, num_axis, x_shape, y_shape, std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const int64_t* x, int64_t* y) { ArithemeticIf<DeviceType::kGPU>::Transpose( ctx, num_axis, x_shape, y_shape, std::vector<int32_t>({permutation.cbegin(), permutation.cend()}), elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::InitializeWithConstConf( DeviceCtx* ctx, const ConstantInitializerConf& initializer_conf, Blob* blob) { WithHostBlobAndStreamSynchronizeEnv(ctx, blob, [&](Blob* host_blob) { ArithemeticIf<DeviceType::kCPU>::InitializeWithConstConf(nullptr, initializer_conf, host_blob); }); } namespace { template<typename T> struct AddOp { __device__ T operator()(T a, T b) const { return a + b; } }; template<typename T> struct SubOp { __device__ T operator()(T a, T b) const { return a - b; } }; template<typename T> struct MulOp { __device__ T operator()(T a, T b) const { return a * b; } }; template<typename T> struct DivOp { __device__ T operator()(T a, T b) const { return a / b; } }; template<template<typename> typename Op, typename T> struct UnaryByScalarFunctor { __host__ __device__ explicit UnaryByScalarFunctor(T scalar) : scalar(scalar) {} __device__ T operator()(T a) const { return Op<T>()(a, scalar); } const T scalar; }; template<template<typename> typename Op, typename T> struct UnaryByScalarPtrFunctorFactory { __host__ __device__ explicit UnaryByScalarPtrFunctorFactory(const T* scalar_ptr) : scalar_ptr(scalar_ptr) {} __device__ UnaryByScalarFunctor<Op, T> operator()() const { return UnaryByScalarFunctor<Op, T>(*scalar_ptr); } const T* scalar_ptr; }; template<template<typename> typename Op, typename T> void LaunchUnaryByScalar(DeviceCtx* ctx, const int64_t n, const T* x, const T y, T* z) { OF_CUDA_CHECK( (cuda::elementwise::Unary(UnaryByScalarFunctor<Op, T>(y), n, z, x, ctx->cuda_stream()))); } template<template<typename> typename Op, typename T> void LaunchUnaryByScalarPtr(DeviceCtx* ctx, const int64_t n, const T* x, const T* y, T* z) { OF_CUDA_CHECK((cuda::elementwise::UnaryWithFactory(UnaryByScalarPtrFunctorFactory<Op, T>(y), n, z, x, ctx->cuda_stream()))); } template<typename T> __global__ void FillGpu(const int64_t n, const T value, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = value; } } template<typename T> __global__ void CopyColsRegionGpu(const int64_t row_num, const int64_t col_num, const T* x, const int64_t x_col_offset, const int64_t x_lda, T* y, const int64_t y_col_offset, const int64_t y_lda) { CUDA_1D_KERNEL_LOOP(index, row_num * col_num) { const int64_t i = index / col_num; const int64_t j = index % col_num; y[i * y_lda + y_col_offset + j] = x[i * x_lda + x_col_offset + j]; } } } // namespace #define OP_BY_SCALAR(op, T) \ void ArithemeticIf<DeviceType::kGPU>::op##ByScalar(DeviceCtx* ctx, const int64_t n, const T* x, \ const T y, T* z) { \ LaunchUnaryByScalar<op##Op, T>(ctx, n, x, y, z); \ } #define OP_BY_SCALAR_HALF(op) \ void ArithemeticIf<DeviceType::kGPU>::op##ByScalar( \ DeviceCtx* ctx, const int64_t n, const float16* x, const float16 y, float16* z) { \ LaunchUnaryByScalar<op##Op, half>(ctx, n, reinterpret_cast<const half*>(x), float16_2half(y), \ reinterpret_cast<half*>(z)); \ } #define DEFINE_OP_BY_SCALAR(op) \ OP_BY_SCALAR(op, float) \ OP_BY_SCALAR(op, double) \ OP_BY_SCALAR(op, int8_t) \ OP_BY_SCALAR(op, int32_t) \ OP_BY_SCALAR(op, int64_t) \ OP_BY_SCALAR_HALF(op) DEFINE_OP_BY_SCALAR(Mul) DEFINE_OP_BY_SCALAR(Add) #define OP_BY_SCALAR_PTR(op, T) \ void ArithemeticIf<DeviceType::kGPU>::op##ByScalarPtr(DeviceCtx* ctx, const int64_t n, \ const T* x, const T* y, T* z) { \ LaunchUnaryByScalarPtr<op##Op, T>(ctx, n, x, y, z); \ } #define OP_BY_SCALAR_PTR_HALF(op) \ void ArithemeticIf<DeviceType::kGPU>::op##ByScalarPtr( \ DeviceCtx* ctx, const int64_t n, const float16* x, const float16* y, float16* z) { \ LaunchUnaryByScalarPtr<op##Op, half>(ctx, n, reinterpret_cast<const half*>(x), \ reinterpret_cast<const half*>(y), \ reinterpret_cast<half*>(z)); \ } #define DEFINE_OP_BY_SCALAR_PTR(op) \ OP_BY_SCALAR_PTR(op, float) \ OP_BY_SCALAR_PTR(op, double) \ OP_BY_SCALAR_PTR(op, int8_t) \ OP_BY_SCALAR_PTR(op, int32_t) \ OP_BY_SCALAR_PTR(op, int64_t) \ OP_BY_SCALAR_PTR_HALF(op) DEFINE_OP_BY_SCALAR_PTR(Mul) DEFINE_OP_BY_SCALAR_PTR(Add) DEFINE_OP_BY_SCALAR_PTR(Sub) DEFINE_OP_BY_SCALAR_PTR(Div) #define FILL(T) \ void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const T value, \ T* y) { \ FillGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( \ n, value, y); \ } FILL(float) FILL(double) FILL(uint8_t); FILL(int8_t) FILL(int32_t) FILL(int64_t) #undef FILL void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const float16 value, float16* y) { FillGpu<half><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, float16_2half(value), reinterpret_cast<half*>(y)); } #define COPY_COLS_REGION(T) \ void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion( \ DeviceCtx* ctx, const int64_t row_num, const int64_t col_num, const T* x, \ const int64_t x_col_offset, const int64_t x_lda, T* y, const int64_t y_col_offset, \ const int64_t y_lda) { \ CopyColsRegionGpu<T><<<BlocksNum4ThreadsNum(row_num* col_num), kCudaThreadsNumPerBlock, 0, \ ctx->cuda_stream()>>>(row_num, col_num, x, x_col_offset, x_lda, y, \ y_col_offset, y_lda); \ } COPY_COLS_REGION(float) COPY_COLS_REGION(double) COPY_COLS_REGION(uint8_t) COPY_COLS_REGION(int8_t) COPY_COLS_REGION(int32_t) COPY_COLS_REGION(int64_t) #undef COPY_COLS_REGION void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion(DeviceCtx* ctx, const int64_t row_num, const int64_t col_num, const float16* x, const int64_t x_col_offset, const int64_t x_lda, float16* y, const int64_t y_col_offset, const int64_t y_lda) { CopyColsRegionGpu<half> <<<BlocksNum4ThreadsNum(row_num * col_num), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( row_num, col_num, reinterpret_cast<const half*>(x), x_col_offset, x_lda, reinterpret_cast<half*>(y), y_col_offset, y_lda); } } // namespace oneflow
747045ed2d942d453df1ee3d0030051fd30e7c48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "matrix/math.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace Matrix { template <typename Type> __global__ void nativePowerKernel(Type* in, Type* out, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < len) { out[idx] = in[idx] * in[idx]; } } template <typename Type> void naivePower(Type* in, Type* out, int len) { static const int TPB = 64; int nblks = ceildiv(len, TPB); hipLaunchKernelGGL(( nativePowerKernel<Type>), dim3(nblks),dim3(TPB), 0, 0, in, out, len); CUDA_CHECK(hipPeekAtLastError()); } template <typename Type> __global__ void nativeSqrtKernel(Type* in, Type* out, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < len) { out[idx] = sqrt(in[idx]); } } template <typename Type> void naiveSqrt(Type* in, Type* out, int len) { static const int TPB = 64; int nblks = ceildiv(len, TPB); hipLaunchKernelGGL(( nativeSqrtKernel<Type>), dim3(nblks),dim3(TPB), 0, 0, in, out, len); CUDA_CHECK(hipPeekAtLastError()); } template <typename Type> __global__ void nativeSignFlipKernel(Type *in, Type *out, int rowCount, int colCount) { int d_i = blockIdx.x * rowCount; int end = d_i + rowCount; if (blockIdx.x < colCount) { Type max = 0.0; int max_index = 0; for (int i = d_i; i < end; i++) { Type val = in[i]; if (val < 0.0) { val = -val; } if (val > max) { max = val; max_index = i; } } for (int i = d_i; i < end; i++) { if (in[max_index] < 0.0) { out[i] = -in[i]; } else { out[i] = in[i]; } } } __syncthreads(); } template <typename Type> void naiveSignFlip(Type *in, Type *out, int rowCount, int colCount) { hipLaunchKernelGGL(( nativeSignFlipKernel<Type>), dim3(colCount),dim3(1), 0, 0, in, out, rowCount, colCount); CUDA_CHECK(hipPeekAtLastError()); } template <typename T> struct MathInputs { T tolerance; int n_row; int n_col; int len; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MathInputs<T>& dims) { return os; } template <typename T> class MathTest: public ::testing::TestWithParam<MathInputs<T> > { protected: void SetUp() override { params = ::testing::TestWithParam<MathInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); int len = params.len; allocate(in_power, len); allocate(out_power_ref, len); allocate(in_sqrt, len); allocate(out_sqrt_ref, len); allocate(in_sign_flip, len); allocate(out_sign_flip_ref, len); allocate(in_ratio, 4); T in_ratio_h[4] = { 1.0, 2.0, 2.0, 3.0 }; updateDevice(in_ratio, in_ratio_h, 4); allocate(out_ratio_ref, 4); T out_ratio_ref_h[4] = { 0.125, 0.25, 0.25, 0.375 }; updateDevice(out_ratio_ref, out_ratio_ref_h, 4); r.uniform(in_power, len, T(-1.0), T(1.0)); r.uniform(in_sqrt, len, T(0.0), T(1.0)); //r.uniform(in_ratio, len, T(0.0), T(1.0)); r.uniform(in_sign_flip, len, T(0.0), T(1.0)); naivePower(in_power, out_power_ref, len); power(in_power, len); naiveSqrt(in_sqrt, out_sqrt_ref, len); seqRoot(in_sqrt, len); ratio(in_ratio, in_ratio, 4); naiveSignFlip(in_sign_flip, out_sign_flip_ref, params.n_row, params.n_col); //signFlip(in_sign_flip, params.n_row, params.n_col); } void TearDown() override { CUDA_CHECK(hipFree(in_power)); CUDA_CHECK(hipFree(out_power_ref)); CUDA_CHECK(hipFree(in_sqrt)); CUDA_CHECK(hipFree(out_sqrt_ref)); CUDA_CHECK(hipFree(in_ratio)); CUDA_CHECK(hipFree(out_ratio_ref)); CUDA_CHECK(hipFree(in_sign_flip)); CUDA_CHECK(hipFree(out_sign_flip_ref)); } protected: MathInputs<T> params; T *in_power, *out_power_ref, *in_sqrt, *out_sqrt_ref, *in_ratio, *out_ratio_ref, *in_sign_flip, *out_sign_flip_ref; }; const std::vector<MathInputs<float> > inputsf = { {0.00001f, 1024, 1024, 1024*1024, 1234ULL} }; const std::vector<MathInputs<double> > inputsd = { {0.00001, 1024, 1024, 1024*1024, 1234ULL} }; typedef MathTest<float> MathPowerTestF; TEST_P(MathPowerTestF, Result) { ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathPowerTestD; TEST_P(MathPowerTestD, Result){ ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathSqrtTestF; TEST_P(MathSqrtTestF, Result) { ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathSqrtTestD; TEST_P(MathSqrtTestD, Result){ ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathRatioTestF; TEST_P(MathRatioTestF, Result) { ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathRatioTestD; TEST_P(MathRatioTestD, Result){ ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathSignFlipTestF; TEST_P(MathSignFlipTestF, Result) { ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathSignFlipTestD; TEST_P(MathSignFlipTestD, Result){ ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestD, ::testing::ValuesIn(inputsd)); } // end namespace LinAlg } // end namespace MLCommon
747045ed2d942d453df1ee3d0030051fd30e7c48.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "matrix/math.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace Matrix { template <typename Type> __global__ void nativePowerKernel(Type* in, Type* out, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < len) { out[idx] = in[idx] * in[idx]; } } template <typename Type> void naivePower(Type* in, Type* out, int len) { static const int TPB = 64; int nblks = ceildiv(len, TPB); nativePowerKernel<Type><<<nblks,TPB>>>(in, out, len); CUDA_CHECK(cudaPeekAtLastError()); } template <typename Type> __global__ void nativeSqrtKernel(Type* in, Type* out, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < len) { out[idx] = sqrt(in[idx]); } } template <typename Type> void naiveSqrt(Type* in, Type* out, int len) { static const int TPB = 64; int nblks = ceildiv(len, TPB); nativeSqrtKernel<Type><<<nblks,TPB>>>(in, out, len); CUDA_CHECK(cudaPeekAtLastError()); } template <typename Type> __global__ void nativeSignFlipKernel(Type *in, Type *out, int rowCount, int colCount) { int d_i = blockIdx.x * rowCount; int end = d_i + rowCount; if (blockIdx.x < colCount) { Type max = 0.0; int max_index = 0; for (int i = d_i; i < end; i++) { Type val = in[i]; if (val < 0.0) { val = -val; } if (val > max) { max = val; max_index = i; } } for (int i = d_i; i < end; i++) { if (in[max_index] < 0.0) { out[i] = -in[i]; } else { out[i] = in[i]; } } } __syncthreads(); } template <typename Type> void naiveSignFlip(Type *in, Type *out, int rowCount, int colCount) { nativeSignFlipKernel<Type><<<colCount,1>>>(in, out, rowCount, colCount); CUDA_CHECK(cudaPeekAtLastError()); } template <typename T> struct MathInputs { T tolerance; int n_row; int n_col; int len; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MathInputs<T>& dims) { return os; } template <typename T> class MathTest: public ::testing::TestWithParam<MathInputs<T> > { protected: void SetUp() override { params = ::testing::TestWithParam<MathInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); int len = params.len; allocate(in_power, len); allocate(out_power_ref, len); allocate(in_sqrt, len); allocate(out_sqrt_ref, len); allocate(in_sign_flip, len); allocate(out_sign_flip_ref, len); allocate(in_ratio, 4); T in_ratio_h[4] = { 1.0, 2.0, 2.0, 3.0 }; updateDevice(in_ratio, in_ratio_h, 4); allocate(out_ratio_ref, 4); T out_ratio_ref_h[4] = { 0.125, 0.25, 0.25, 0.375 }; updateDevice(out_ratio_ref, out_ratio_ref_h, 4); r.uniform(in_power, len, T(-1.0), T(1.0)); r.uniform(in_sqrt, len, T(0.0), T(1.0)); //r.uniform(in_ratio, len, T(0.0), T(1.0)); r.uniform(in_sign_flip, len, T(0.0), T(1.0)); naivePower(in_power, out_power_ref, len); power(in_power, len); naiveSqrt(in_sqrt, out_sqrt_ref, len); seqRoot(in_sqrt, len); ratio(in_ratio, in_ratio, 4); naiveSignFlip(in_sign_flip, out_sign_flip_ref, params.n_row, params.n_col); //signFlip(in_sign_flip, params.n_row, params.n_col); } void TearDown() override { CUDA_CHECK(cudaFree(in_power)); CUDA_CHECK(cudaFree(out_power_ref)); CUDA_CHECK(cudaFree(in_sqrt)); CUDA_CHECK(cudaFree(out_sqrt_ref)); CUDA_CHECK(cudaFree(in_ratio)); CUDA_CHECK(cudaFree(out_ratio_ref)); CUDA_CHECK(cudaFree(in_sign_flip)); CUDA_CHECK(cudaFree(out_sign_flip_ref)); } protected: MathInputs<T> params; T *in_power, *out_power_ref, *in_sqrt, *out_sqrt_ref, *in_ratio, *out_ratio_ref, *in_sign_flip, *out_sign_flip_ref; }; const std::vector<MathInputs<float> > inputsf = { {0.00001f, 1024, 1024, 1024*1024, 1234ULL} }; const std::vector<MathInputs<double> > inputsd = { {0.00001, 1024, 1024, 1024*1024, 1234ULL} }; typedef MathTest<float> MathPowerTestF; TEST_P(MathPowerTestF, Result) { ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathPowerTestD; TEST_P(MathPowerTestD, Result){ ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathSqrtTestF; TEST_P(MathSqrtTestF, Result) { ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathSqrtTestD; TEST_P(MathSqrtTestD, Result){ ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathRatioTestF; TEST_P(MathRatioTestF, Result) { ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathRatioTestD; TEST_P(MathRatioTestD, Result){ ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathSignFlipTestF; TEST_P(MathSignFlipTestF, Result) { ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathSignFlipTestD; TEST_P(MathSignFlipTestD, Result){ ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestD, ::testing::ValuesIn(inputsd)); } // end namespace LinAlg } // end namespace MLCommon
01bd9f4208e46e4c900216ecadcb27a4c1adb6e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GpuBigKernel.h" void initializeGPU(){ const int kb = 1024; const int mb = kb * kb; int num_devices, device; hipGetDeviceCount(&num_devices); if (num_devices > 1) { int max_multiprocessors = 0, max_device = 0; std::cout << "Deformable module for MEGAMOL" << std::endl << "=========" << std::endl << std::endl; std::cout << "CUDA version: v" << CUDART_VERSION << std::endl; std::cout << "Thrust version: v" << THRUST_MAJOR_VERSION << "." << THRUST_MINOR_VERSION << std::endl << std::endl; std::cout<<"CUDA Devices: " << std::endl << std::endl; for (device = 0; device < num_devices; device++) { hipDeviceProp_t props; hipGetDeviceProperties(&props, device); std::cout << device << ": " << props.name << ": " << props.major << "." << props.minor << std::endl; std::cout << " Global memory: " << props.totalGlobalMem / mb << "mb" << std::endl; std::cout << " Shared memory: " << props.sharedMemPerBlock / kb << "kb" << std::endl; std::cout << " Constant memory: " << props.totalConstMem / kb << "kb" << std::endl; std::cout << " Block registers: " << props.regsPerBlock << std::endl << std::endl; std::cout << " Warp size: " << props.warpSize << std::endl; std::cout << " Threads per block: " << props.maxThreadsPerBlock << std::endl; std::cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", " << props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2] << " ]" << std::endl; std::cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" << std::endl; std::cout << std::endl; if (max_multiprocessors < props.multiProcessorCount) { max_multiprocessors = props.multiProcessorCount; max_device = device; } } hipSetDevice(max_device); std::cout<<"Selected Device"<<max_device<<std::endl; } } void theBigLoop1(std::vector<float> &m,std::vector<float> &roh, std::vector<float> &proh,std::vector<float> &droh,std::vector<glm::mat3> &D, std::vector<float> &d,std::vector<float>&eta,std::vector<glm::vec3> &pos, std::vector<glm::vec3> &vel, std::vector<glm::mat3> &gradV,std::vector<glm::vec3> &acc, std::vector<glm::vec3> &pacc,std::vector<glm::mat3> &S,std::vector<glm::vec3> &gradS, std::vector<float> &P, std::vector<glm::vec3> &gradP,std::vector<glm::vec3> &gradW, std::vector<glm::vec3> &pie,std::vector<float> &W){ //streams can be used to copy multiple data structures to the device memeory. //hipStream_t s1; //hipStream_t s2; //hipStream_t s3; //hipStreamCreate(&s1); //hipStreamCreate(&s2); //hipStreamCreate(&s3); //Declare the device storage. std::vector<float> d_m; std::vector<float> d_roh; std::vector<float> d_proh; std::vector<float> d_droh; std::vector<glm::mat3> d_D; std::vector<float> d_d; std::vector<float>d_eta; std::vector<glm::vec3> d_pos, std::vector<glm::vec3> d_vel; std::vector<glm::mat3> d_gradV; std::vector<glm::vec3> d_acc; std::vector<glm::vec3> d_pacc; std::vector<glm::mat3> d_S; std::vector<glm::vec3> d_gradS; std::vector<float> d_P; std::vector<glm::vec3> d_gradP; std::vector<glm::vec3> d_gradW; std::vector<glm::vec3> d_pie; std::vector<float> d_W; //try making a init function where storage for constant //things like m are not transfered again and again. hipMalloc(&d_m,sizeof(m)); hipMemcpy(d_m, m,sizeof(m), hipMemcpyHostToDevice); //hipMemcpyAsync(d_elms_cm, elms_cm,size_elms_cm, hipMemcpyHostToDevice, s1); hipMalloc(&d_roh,sizeof(roh)); hipMemcpy(d_roh, roh, sizeof(roh), hipMemcpyHostToDevice); //hipMemcpyAsync(d_vertices_cm, vertices_cm, sizeof(vertices_cm), hipMemcpyHostToDevice, s2); hipMalloc(&d_proh,sizeof(proh)); hipMemcpy(d_proh, proh, sizeof(proh), hipMemcpyHostToDevice); //hipMemcpyAsync(d_out_elemInfo, out_elemInfo, sizeof(out_elemInfo), hipMemcpyHostToDevice, s3); hipMalloc(&d_droh,sizeof(droh)); hipMemcpy(d_droh, droh, sizeof(droh), hipMemcpyHostToDevice); hipMalloc(&d_D,sizeof(D)); hipMemcpy(d_D, D, sizeof(D), hipMemcpyHostToDevice); hipMalloc(&d_d,sizeof(d)); hipMemcpy(d_d, d, sizeof(d), hipMemcpyHostToDevice); hipMalloc(&d_eta,sizeof(eta)); hipMemcpy(d_eta, eta, sizeof(eta), hipMemcpyHostToDevice); hipMalloc(&d_pos,sizeof(pos)); hipMemcpy(d_pos, pos, sizeof(pos), hipMemcpyHostToDevice); hipMalloc(&d_vel,sizeof(vel)); hipMemcpy(d_vel, vel, sizeof(vel), hipMemcpyHostToDevice); hipMalloc(&d_gradV,sizeof(gradV)); hipMemcpy(d_gradV, gradV, sizeof(gradV), hipMemcpyHostToDevice); hipMalloc(&d_acc,sizeof(acc)); hipMemcpy(d_acc, acc, sizeof(acc), hipMemcpyHostToDevice); hipMalloc(&d_pacc,sizeof(pacc)); hipMemcpy(d_pacc, pacc, sizeof(pacc), hipMemcpyHostToDevice); hipMalloc(&d_S,sizeof(S)); hipMemcpy(d_S, S, sizeof(S), hipMemcpyHostToDevice); hipMalloc(&d_gradS,sizeof(gradS)); hipMemcpy(d_gradS, gradS, sizeof(gradS), hipMemcpyHostToDevice); hipMalloc(&d_P,sizeof(P)); hipMemcpy(d_P, P, sizeof(P), hipMemcpyHostToDevice); hipMalloc(&d_gradP,sizeof(gradP)); hipMemcpy(d_gradP, gradP, sizeof(gradP), hipMemcpyHostToDevice); hipMalloc(&d_gradW,sizeof(gradW)); hipMemcpy(d_gradW, gradW, sizeof(gradW), hipMemcpyHostToDevice); hipMalloc(&d_pie,sizeof(pie)); hipMemcpy(d_pie, pie, sizeof(pie), hipMemcpyHostToDevice); hipMalloc(&d_W,sizeof(W)); hipMemcpy(d_W, W, sizeof(W), hipMemcpyHostToDevice); int threadsPerBlock=128; int blocksPerGrid=(m.size()/threadsPerBlock) +1; hipLaunchKernelGGL(( computeTheBigLoop), dim3(blocksPerGrid),dim3(threadsPerBloack), 0, 0, std::vector<float> d_m,d_roh,d_proh,d_droh, d_D, d_d,d_eta,d_pos,d_vel,d_gradV,d_acc,d_pacc,d_S, d_gradS,d_P, d_gradP, d_gradW,d_pie,d_W;); } __global__ void computeTheBigLoop(std::vector<float> &m,std::vector<float> &d_roh,std::vector<float> &d_proh, std::vector<float> &d_droh,std::vector<glm::mat3> &d_D, std::vector<float> &d_d, std::vector<float>&d_eta,std::vector<glm::vec3> &d_pos, std::vector<glm::vec3> &d_vel, std::vector<glm::mat3> &d_gradV,std::vector<glm::vec3> &d_acc, std::vector<glm::vec3> &d_pacc,std::vector<glm::mat3> &d_S, std::vector<glm::vec3> &d_gradS,std::vector<float> &d_P, std::vector<glm::vec3> &d_gradP,std::vector<glm::vec3> &d_gradW, std::vector<glm::vec3> &d_pie,std::vector<float> &d_W){ dim3 index= blockIdx.x*blockDim.x+threadIdx.x; //call other kernels for this one // a kernel will be spawned for each of the material points. std::vector<glm::vec3> nxi; // if using dynamic programming then use the following //int threadsPerBlock=32; //int blocksPerGrid=(m.size()/threadsPerBlock) +1; //computeFindNeighbours<<<blocksPerGrid,threadsPerBloack>>>(i,d_pos,d_nxi,d_h); for(int i =0; i <pos.size();i++){ if(glm::distance(pos[index],pos[i])<2*h){ if(index!=i){ nxi.push_back(i); } } } for(int j=0;j<nxi.size();j++) { //Calculating W and gradW d_W[j]=((315/208)*3.14*h*h*h)* w(glm::distance(d_pos[i],d_pos[nxi[j]])/h); float dXiXj=glm::distance(d_pos[i],d_pos[nxi[j]]); float multiplier=-(9/(4*h))+(19/(8*h*h))*dXiXj+(5/(8*h*h*h))*dXiXj; d_gradW[j].x= multiplier*d_pos[i].x-d_pos[nxi[j]].x; d_gradW[j].y= multiplier*d_pos[i].y-d_pos[nxi[j]].y; d_gradW[j].z= multiplier*d_pos[i].y-d_pos[nxi[j]].z; //calculating gradV glm::vec3 v=(d_m[nxi[j]]/d_roh[nxi[j]])*(d_vel[nxi[j]]-d_vel[index]); d_gradV[index]=glm::outerProduct(v,d_gradW[j]); d_D[index]=d_gradV[index]+glm::transpose(d_gradV[index]); for(int i=0;i<d_D.size();i++){ glm::mat3 D1= 0.5*d_D[i]; d_droh[i] = D1[0][0]+D1[1][1]+D1[2][2]; } //used to calculate density outside the loop float sum=0; sum+=d_m[nxi[j]]*d_W[j]; } //calculating the density roh d_proh[index]=d_roh[index]; d_roh[index]=sum; //calculating pressure d_P[index]=c*c*(d_roh[index]-roh0); glm::mat3 D11= 0.5*d_D[index]; //compute the d from the current rate of deformation tensor float d1=std::sqrt(D11[0][0]+D11[1][1]+D11[2][2]); float exp=::exp(-d1*(jn+1)); // since we have fixed n to be .5 other wise use the commented version. d_eta[index]=(1-exp)*((1/d1)*(1/d1)); //eta[index]=(1-exp)*(::pow(d1,n-1)*(1/d1)); d_S[index]=d_eta[index]*d_D[index]; glm::vec3 sumGP(0.0f,0.0f,0.0f); float ptemp; glm::vec3 sumPI(0.0f,0.0f,0.0f); glm::vec3 sumGS(0.0f,0.0f,0.0f); for(int j=0;j<nxi.size();j++) { if(glm::dot((d_vel[index]-d_vel[nxi[j]]),(d_pos[index]-d_pos[nxi[j]]))<0) { float dist=glm::distance(d_pos[index],d_pos[nxi[j]]); float mu=h*glm::dot((d_vel[index]-d_vel[nxi[j]]),(d_pos[index]-d_pos[nxi[j]]))/(dist*dist+0.01*h*h); sumPI+=d_m[nxi[j]]*((2*alpha*c*(h*mu))/(d_roh[index]+d_roh[j]))*d_gradW[j]; } else sumPI+=0; //using the formulation in paper. glm::mat3 sv=(d_m[nxi[j]]/(d_roh[nxi[j]]*d_roh[index]))*(d_S[nxi[j]]+d_S[index]); sumGS+=sv*d_gradW[j]; ptemp=(d_m[nxi[j]])*((d_P[nxi[j]]/::pow(d_roh[nxi[j]],2))+d_P[index]/::pow(d_roh[index],2)); sumGP+=ptemp*d_gradW[j]; } d_gradP[index]=sumGP; d_gradS[index]=sumGS; d_pie[index]=sumPI; } void SPHSimulation::updateS(std::vector<float> &eta,std::vector<glm::mat3>D, std::vector<glm::mat3>&S){ for(int i=0;i<D.size();i++){ S[i]=eta[i]*D[i]; } __global__ void computeFindNeighbours(int index, std::vector<glm::vec3> &pos, std::vector<glm::vec3> &nxi)){ dim3 i= blockIdx.x*blockDim.x+threadIdx.x; if(glm::distance(pos[index],pos[i])<2*h){ if(index!=i){ nxi.push_back(i); } } } __global__ void calcGradW(W,gradW,pos,i,nxi){ } __global__ void updategradV(m,roh,vel,gradV,gradW,i,nxi){ } __global__ void updateD(gradV,D,i){ } __global__ void updateDDE(D,droh)//see equaqtion 10 { } __global__ void compute_roh(roh,proh,m,W,i,nxi){ } __global__ void updateP(P,roh0,roh,c) //see equaqtion 6 { } __global__ void updateEta(eta,D,jn,n) //see equaqtion 4 { } __global__ void updateS(eta,D,S){ } //for artificial viscosity equation 12 __global__ void compute_pi(m,roh,pie,h,gradW,i,nxi,vel,pos,alpha,c){ } // to update acceleration we need to calculate grad p and grad s __global__ void compute_gradS(roh,S,gradW,i,m,gradS,nxi){ } __global__ void compute_gradP(m,roh,P,gradW,gradP,i,nxi){ }
01bd9f4208e46e4c900216ecadcb27a4c1adb6e4.cu
#include "GpuBigKernel.h" void initializeGPU(){ const int kb = 1024; const int mb = kb * kb; int num_devices, device; cudaGetDeviceCount(&num_devices); if (num_devices > 1) { int max_multiprocessors = 0, max_device = 0; std::cout << "Deformable module for MEGAMOL" << std::endl << "=========" << std::endl << std::endl; std::cout << "CUDA version: v" << CUDART_VERSION << std::endl; std::cout << "Thrust version: v" << THRUST_MAJOR_VERSION << "." << THRUST_MINOR_VERSION << std::endl << std::endl; std::cout<<"CUDA Devices: " << std::endl << std::endl; for (device = 0; device < num_devices; device++) { cudaDeviceProp props; cudaGetDeviceProperties(&props, device); std::cout << device << ": " << props.name << ": " << props.major << "." << props.minor << std::endl; std::cout << " Global memory: " << props.totalGlobalMem / mb << "mb" << std::endl; std::cout << " Shared memory: " << props.sharedMemPerBlock / kb << "kb" << std::endl; std::cout << " Constant memory: " << props.totalConstMem / kb << "kb" << std::endl; std::cout << " Block registers: " << props.regsPerBlock << std::endl << std::endl; std::cout << " Warp size: " << props.warpSize << std::endl; std::cout << " Threads per block: " << props.maxThreadsPerBlock << std::endl; std::cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", " << props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2] << " ]" << std::endl; std::cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" << std::endl; std::cout << std::endl; if (max_multiprocessors < props.multiProcessorCount) { max_multiprocessors = props.multiProcessorCount; max_device = device; } } cudaSetDevice(max_device); std::cout<<"Selected Device"<<max_device<<std::endl; } } void theBigLoop1(std::vector<float> &m,std::vector<float> &roh, std::vector<float> &proh,std::vector<float> &droh,std::vector<glm::mat3> &D, std::vector<float> &d,std::vector<float>&eta,std::vector<glm::vec3> &pos, std::vector<glm::vec3> &vel, std::vector<glm::mat3> &gradV,std::vector<glm::vec3> &acc, std::vector<glm::vec3> &pacc,std::vector<glm::mat3> &S,std::vector<glm::vec3> &gradS, std::vector<float> &P, std::vector<glm::vec3> &gradP,std::vector<glm::vec3> &gradW, std::vector<glm::vec3> &pie,std::vector<float> &W){ //streams can be used to copy multiple data structures to the device memeory. //cudaStream_t s1; //cudaStream_t s2; //cudaStream_t s3; //cudaStreamCreate(&s1); //cudaStreamCreate(&s2); //cudaStreamCreate(&s3); //Declare the device storage. std::vector<float> d_m; std::vector<float> d_roh; std::vector<float> d_proh; std::vector<float> d_droh; std::vector<glm::mat3> d_D; std::vector<float> d_d; std::vector<float>d_eta; std::vector<glm::vec3> d_pos, std::vector<glm::vec3> d_vel; std::vector<glm::mat3> d_gradV; std::vector<glm::vec3> d_acc; std::vector<glm::vec3> d_pacc; std::vector<glm::mat3> d_S; std::vector<glm::vec3> d_gradS; std::vector<float> d_P; std::vector<glm::vec3> d_gradP; std::vector<glm::vec3> d_gradW; std::vector<glm::vec3> d_pie; std::vector<float> d_W; //try making a init function where storage for constant //things like m are not transfered again and again. cudaMalloc(&d_m,sizeof(m)); cudaMemcpy(d_m, m,sizeof(m), cudaMemcpyHostToDevice); //cudaMemcpyAsync(d_elms_cm, elms_cm,size_elms_cm, cudaMemcpyHostToDevice, s1); cudaMalloc(&d_roh,sizeof(roh)); cudaMemcpy(d_roh, roh, sizeof(roh), cudaMemcpyHostToDevice); //cudaMemcpyAsync(d_vertices_cm, vertices_cm, sizeof(vertices_cm), cudaMemcpyHostToDevice, s2); cudaMalloc(&d_proh,sizeof(proh)); cudaMemcpy(d_proh, proh, sizeof(proh), cudaMemcpyHostToDevice); //cudaMemcpyAsync(d_out_elemInfo, out_elemInfo, sizeof(out_elemInfo), cudaMemcpyHostToDevice, s3); cudaMalloc(&d_droh,sizeof(droh)); cudaMemcpy(d_droh, droh, sizeof(droh), cudaMemcpyHostToDevice); cudaMalloc(&d_D,sizeof(D)); cudaMemcpy(d_D, D, sizeof(D), cudaMemcpyHostToDevice); cudaMalloc(&d_d,sizeof(d)); cudaMemcpy(d_d, d, sizeof(d), cudaMemcpyHostToDevice); cudaMalloc(&d_eta,sizeof(eta)); cudaMemcpy(d_eta, eta, sizeof(eta), cudaMemcpyHostToDevice); cudaMalloc(&d_pos,sizeof(pos)); cudaMemcpy(d_pos, pos, sizeof(pos), cudaMemcpyHostToDevice); cudaMalloc(&d_vel,sizeof(vel)); cudaMemcpy(d_vel, vel, sizeof(vel), cudaMemcpyHostToDevice); cudaMalloc(&d_gradV,sizeof(gradV)); cudaMemcpy(d_gradV, gradV, sizeof(gradV), cudaMemcpyHostToDevice); cudaMalloc(&d_acc,sizeof(acc)); cudaMemcpy(d_acc, acc, sizeof(acc), cudaMemcpyHostToDevice); cudaMalloc(&d_pacc,sizeof(pacc)); cudaMemcpy(d_pacc, pacc, sizeof(pacc), cudaMemcpyHostToDevice); cudaMalloc(&d_S,sizeof(S)); cudaMemcpy(d_S, S, sizeof(S), cudaMemcpyHostToDevice); cudaMalloc(&d_gradS,sizeof(gradS)); cudaMemcpy(d_gradS, gradS, sizeof(gradS), cudaMemcpyHostToDevice); cudaMalloc(&d_P,sizeof(P)); cudaMemcpy(d_P, P, sizeof(P), cudaMemcpyHostToDevice); cudaMalloc(&d_gradP,sizeof(gradP)); cudaMemcpy(d_gradP, gradP, sizeof(gradP), cudaMemcpyHostToDevice); cudaMalloc(&d_gradW,sizeof(gradW)); cudaMemcpy(d_gradW, gradW, sizeof(gradW), cudaMemcpyHostToDevice); cudaMalloc(&d_pie,sizeof(pie)); cudaMemcpy(d_pie, pie, sizeof(pie), cudaMemcpyHostToDevice); cudaMalloc(&d_W,sizeof(W)); cudaMemcpy(d_W, W, sizeof(W), cudaMemcpyHostToDevice); int threadsPerBlock=128; int blocksPerGrid=(m.size()/threadsPerBlock) +1; computeTheBigLoop<<<blocksPerGrid,threadsPerBloack>>>(std::vector<float> d_m,d_roh,d_proh,d_droh, d_D, d_d,d_eta,d_pos,d_vel,d_gradV,d_acc,d_pacc,d_S, d_gradS,d_P, d_gradP, d_gradW,d_pie,d_W;); } __global__ void computeTheBigLoop(std::vector<float> &m,std::vector<float> &d_roh,std::vector<float> &d_proh, std::vector<float> &d_droh,std::vector<glm::mat3> &d_D, std::vector<float> &d_d, std::vector<float>&d_eta,std::vector<glm::vec3> &d_pos, std::vector<glm::vec3> &d_vel, std::vector<glm::mat3> &d_gradV,std::vector<glm::vec3> &d_acc, std::vector<glm::vec3> &d_pacc,std::vector<glm::mat3> &d_S, std::vector<glm::vec3> &d_gradS,std::vector<float> &d_P, std::vector<glm::vec3> &d_gradP,std::vector<glm::vec3> &d_gradW, std::vector<glm::vec3> &d_pie,std::vector<float> &d_W){ dim3 index= blockIdx.x*blockDim.x+threadIdx.x; //call other kernels for this one // a kernel will be spawned for each of the material points. std::vector<glm::vec3> nxi; // if using dynamic programming then use the following //int threadsPerBlock=32; //int blocksPerGrid=(m.size()/threadsPerBlock) +1; //computeFindNeighbours<<<blocksPerGrid,threadsPerBloack>>>(i,d_pos,d_nxi,d_h); for(int i =0; i <pos.size();i++){ if(glm::distance(pos[index],pos[i])<2*h){ if(index!=i){ nxi.push_back(i); } } } for(int j=0;j<nxi.size();j++) { //Calculating W and gradW d_W[j]=((315/208)*3.14*h*h*h)* w(glm::distance(d_pos[i],d_pos[nxi[j]])/h); float dXiXj=glm::distance(d_pos[i],d_pos[nxi[j]]); float multiplier=-(9/(4*h))+(19/(8*h*h))*dXiXj+(5/(8*h*h*h))*dXiXj; d_gradW[j].x= multiplier*d_pos[i].x-d_pos[nxi[j]].x; d_gradW[j].y= multiplier*d_pos[i].y-d_pos[nxi[j]].y; d_gradW[j].z= multiplier*d_pos[i].y-d_pos[nxi[j]].z; //calculating gradV glm::vec3 v=(d_m[nxi[j]]/d_roh[nxi[j]])*(d_vel[nxi[j]]-d_vel[index]); d_gradV[index]=glm::outerProduct(v,d_gradW[j]); d_D[index]=d_gradV[index]+glm::transpose(d_gradV[index]); for(int i=0;i<d_D.size();i++){ glm::mat3 D1= 0.5*d_D[i]; d_droh[i] = D1[0][0]+D1[1][1]+D1[2][2]; } //used to calculate density outside the loop float sum=0; sum+=d_m[nxi[j]]*d_W[j]; } //calculating the density roh d_proh[index]=d_roh[index]; d_roh[index]=sum; //calculating pressure d_P[index]=c*c*(d_roh[index]-roh0); glm::mat3 D11= 0.5*d_D[index]; //compute the d from the current rate of deformation tensor float d1=std::sqrt(D11[0][0]+D11[1][1]+D11[2][2]); float exp=std::exp(-d1*(jn+1)); // since we have fixed n to be .5 other wise use the commented version. d_eta[index]=(1-exp)*((1/d1)*(1/d1)); //eta[index]=(1-exp)*(std::pow(d1,n-1)*(1/d1)); d_S[index]=d_eta[index]*d_D[index]; glm::vec3 sumGP(0.0f,0.0f,0.0f); float ptemp; glm::vec3 sumPI(0.0f,0.0f,0.0f); glm::vec3 sumGS(0.0f,0.0f,0.0f); for(int j=0;j<nxi.size();j++) { if(glm::dot((d_vel[index]-d_vel[nxi[j]]),(d_pos[index]-d_pos[nxi[j]]))<0) { float dist=glm::distance(d_pos[index],d_pos[nxi[j]]); float mu=h*glm::dot((d_vel[index]-d_vel[nxi[j]]),(d_pos[index]-d_pos[nxi[j]]))/(dist*dist+0.01*h*h); sumPI+=d_m[nxi[j]]*((2*alpha*c*(h*mu))/(d_roh[index]+d_roh[j]))*d_gradW[j]; } else sumPI+=0; //using the formulation in paper. glm::mat3 sv=(d_m[nxi[j]]/(d_roh[nxi[j]]*d_roh[index]))*(d_S[nxi[j]]+d_S[index]); sumGS+=sv*d_gradW[j]; ptemp=(d_m[nxi[j]])*((d_P[nxi[j]]/std::pow(d_roh[nxi[j]],2))+d_P[index]/std::pow(d_roh[index],2)); sumGP+=ptemp*d_gradW[j]; } d_gradP[index]=sumGP; d_gradS[index]=sumGS; d_pie[index]=sumPI; } void SPHSimulation::updateS(std::vector<float> &eta,std::vector<glm::mat3>D, std::vector<glm::mat3>&S){ for(int i=0;i<D.size();i++){ S[i]=eta[i]*D[i]; } __global__ void computeFindNeighbours(int index, std::vector<glm::vec3> &pos, std::vector<glm::vec3> &nxi)){ dim3 i= blockIdx.x*blockDim.x+threadIdx.x; if(glm::distance(pos[index],pos[i])<2*h){ if(index!=i){ nxi.push_back(i); } } } __global__ void calcGradW(W,gradW,pos,i,nxi){ } __global__ void updategradV(m,roh,vel,gradV,gradW,i,nxi){ } __global__ void updateD(gradV,D,i){ } __global__ void updateDDE(D,droh)//see equaqtion 10 { } __global__ void compute_roh(roh,proh,m,W,i,nxi){ } __global__ void updateP(P,roh0,roh,c) //see equaqtion 6 { } __global__ void updateEta(eta,D,jn,n) //see equaqtion 4 { } __global__ void updateS(eta,D,S){ } //for artificial viscosity equation 12 __global__ void compute_pi(m,roh,pie,h,gradW,i,nxi,vel,pos,alpha,c){ } // to update acceleration we need to calculate grad p and grad s __global__ void compute_gradS(roh,S,gradW,i,m,gradS,nxi){ } __global__ void compute_gradP(m,roh,P,gradW,gradP,i,nxi){ }
cfd8fe8224fc013876addc1a4f9a4935cdfe084b.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************************** Source Code : SOAvsAOS.cu Objective : Example code to demonstrate the advantage of having Stucture of arrays rather than array of structures in the application while representing data the corresponding advantages in terms of the bandwidth of the global memory that is achievable Description : This example takes "Triangle" as structure with three arrays of three floating points each representing the three vertices of a triangle the same information is also is stored using a structure "Triangles" which has arrays for each field of each vertex both the representations are intialised generating typical access patterns that will be present while accessing those structures Input : none Output : The different bandwidths that are achieved while accessing data from different data representations Modified : Aug 2011 Author : RarchK ****************************************************************************************/ #include <stdio.h> #include <hip/hip_runtime.h> #include <float.h> #define NO_OF_TRIANGLES 1000000 #define BLOCK_SIZE 128 #define NO_OF_PATTERNS 2 #define NTIMES 10 #define MIN(x,y) ((x)<(y)?(x):(y)) #define MAX(x,y) ((x)>(y)?(x):(y)) void printResults(void); // Triangle structure -- which is the structure in "Array of Structures" struct Triangle { float A[3]; float B[3]; float C[3]; }; // Triangles structure -- which is the structure in "Structure of Arrays" struct Triangles { float *Ax, *Ay, *Az; float *Bx, *By, *Bz; float *Cx, *Cy, *Cz; }; ///////////////////////////////////////////////////////////////////////////////////// // // initializing the array of Triangle types. // each element is intialized to -- (0,0,0),(1,1,1),(2,2,2) // //////////////////////////////////////////////////////////////////////////////////// __global__ void setTriangles(Triangle *myArrayOfTriangles) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx < NO_OF_TRIANGLES) { myArrayOfTriangles[idx].A[0] = 0; myArrayOfTriangles[idx].A[1] = 0; myArrayOfTriangles[idx].A[2] = 0; myArrayOfTriangles[idx].B[0] = 1; myArrayOfTriangles[idx].B[1] = 1; myArrayOfTriangles[idx].B[2] = 1; myArrayOfTriangles[idx].C[0] = 2; myArrayOfTriangles[idx].C[1] = 2; myArrayOfTriangles[idx].C[2] = 2; } } ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// // // initializing the Triangles structure // each triangle's vertices are initialized to -- (0,0,0),(1,1,1),(2,2,2) // //////////////////////////////////////////////////////////////////////////////////// __global__ void setTriangles2(Triangles myTrianglesStructure) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx < NO_OF_TRIANGLES) { myTrianglesStructure.Ax[idx] = 0; myTrianglesStructure.Ay[idx] = 0; myTrianglesStructure.Az[idx] = 0; myTrianglesStructure.Bx[idx] = 1; myTrianglesStructure.By[idx] = 1; myTrianglesStructure.Bz[idx] = 1; myTrianglesStructure.Cx[idx] = 2; myTrianglesStructure.Cy[idx] = 2; myTrianglesStructure.Cz[idx] = 2; } } //////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////// // // checking for the equivalence of both the initialising kernals // ////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void Test(Triangle *myArrayOfTriangles, Triangles myTrianglesStructure, int* dCorrectBool) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx < NO_OF_TRIANGLES) if(myTrianglesStructure.Ax[idx] != myArrayOfTriangles[idx].A[0] || myTrianglesStructure.Ay[idx] != myArrayOfTriangles[idx].A[1] || myTrianglesStructure.Az[idx] != myArrayOfTriangles[idx].A[2] || myTrianglesStructure.Bx[idx] != myArrayOfTriangles[idx].B[0] || myTrianglesStructure.By[idx] != myArrayOfTriangles[idx].B[1] || myTrianglesStructure.Bz[idx] != myArrayOfTriangles[idx].B[2] || myTrianglesStructure.Cx[idx] != myArrayOfTriangles[idx].C[0] || myTrianglesStructure.Cy[idx] != myArrayOfTriangles[idx].C[1] || myTrianglesStructure.Cz[idx] != myArrayOfTriangles[idx].C[2] ) *dCorrectBool = 0; } //////////////////////////////////////////////////////////////////////////////////////////////////////////// static double avgtime[NO_OF_PATTERNS] = {0}, maxtime[NO_OF_PATTERNS] = {0}, mintime[NO_OF_PATTERNS]; static float bandWidths[NO_OF_PATTERNS] = {0}; static double bytes = sizeof(float) * NO_OF_TRIANGLES * 9; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // the main routene // for timing the different kernals which are accessing array of structures and structure of arrays // finding the bandwidths // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char* argv[]) { Triangle *myArrayOfTriangles; // Array of Structures -- array of struct Triangle type Triangles myTrianglesStructure; // Structure of Arrays int* dCorrectBool; // on the device variable int hCorrectBool = 1; float elapsedTimes[NO_OF_PATTERNS][NTIMES]; hipEvent_t start,stop; hipError_t err = hipSuccess; // event creation, which will be used for timing the code hipEventCreate(&start); hipEventCreate(&stop); // memory allocation on the device err = hipMalloc((void **)&myArrayOfTriangles,sizeof(Triangle)*NO_OF_TRIANGLES); if(err == hipErrorMemoryAllocation) { printf("error in device memory allocation for - myArrayOfTriangles\n exiting out of the program.....\n"); exit(-1); } err = hipMalloc((void **)&myTrianglesStructure.Ax,sizeof(float)*NO_OF_TRIANGLES); if(err == hipErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Ax\n exiting out of the program.....\n"); exit(-1); } err = hipMalloc((void **)&myTrianglesStructure.Ay,sizeof(float)*NO_OF_TRIANGLES); if(err == hipErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Ay\n exiting out of the program.....\n"); exit(-1); } err = hipMalloc((void **)&myTrianglesStructure.Az,sizeof(float)*NO_OF_TRIANGLES); if(err == hipErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Az\n exiting out of the program.....\n"); exit(-1); } err = hipMalloc((void **)&myTrianglesStructure.Bx,sizeof(float)*NO_OF_TRIANGLES); if(err == hipErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Bx\n exiting out of the program.....\n"); exit(-1); } err = hipMalloc((void **)&myTrianglesStructure.By,sizeof(float)*NO_OF_TRIANGLES); if(err == hipErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.By\n exiting out of the program.....\n"); exit(-1); } err = hipMalloc((void **)&myTrianglesStructure.Bz,sizeof(float)*NO_OF_TRIANGLES); if(err == hipErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Bz\n exiting out of the program.....\n"); exit(-1); } err = hipMalloc((void **)&myTrianglesStructure.Cx,sizeof(float)*NO_OF_TRIANGLES); if(err == hipErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Cx\n exiting out of the program.....\n"); exit(-1); } err = hipMalloc((void **)&myTrianglesStructure.Cy,sizeof(float)*NO_OF_TRIANGLES); if(err == hipErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Cy\n exiting out of the program.....\n"); exit(-1); } err = hipMalloc((void **)&myTrianglesStructure.Cz,sizeof(float)*NO_OF_TRIANGLES); if(err == hipErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Cz\n exiting out of the program.....\n"); exit(-1); } err = hipMalloc((void**)&dCorrectBool, sizeof(int)); if(err == hipErrorMemoryAllocation) { printf("error in device memory allocation for dCorrectBool\n exiting out of the program.....\n"); exit(-1); } // copying hCorrectBool into dCorrectBool hipMemcpy(dCorrectBool,&hCorrectBool,sizeof(int),hipMemcpyHostToDevice); //finding the 1D grid size int gridSize = NO_OF_TRIANGLES/BLOCK_SIZE; if( NO_OF_TRIANGLES % BLOCK_SIZE != 0 ) gridSize += 1; // running each pattern NTIMES for(int k=0; k < NTIMES; k++) { // timing the kernels corresponding to different access patterns // PATTERN 1 hipEventRecord(start,0); hipLaunchKernelGGL(( setTriangles) , dim3(gridSize) , dim3(BLOCK_SIZE) , 0, 0, myArrayOfTriangles); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTimes[0][k],start,stop); // PATTERN 2 hipEventRecord(start,0); hipLaunchKernelGGL(( setTriangles2) , dim3(gridSize) , dim3(BLOCK_SIZE) , 0, 0, myTrianglesStructure); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTimes[1][k],start,stop); } // end of the for loop involving NTIMES hipLaunchKernelGGL(( Test) , dim3(gridSize), dim3(BLOCK_SIZE) , 0, 0, myArrayOfTriangles,myTrianglesStructure,dCorrectBool); // testing the equivalence of both the intializing kernals // copying back the value of dCorrectBool into hCorrectBool hipMemcpy(&hCorrectBool,dCorrectBool,sizeof(int),hipMemcpyDeviceToHost); if(hCorrectBool != 1) // if the kernels are not equivalent { printf("the kernel executions are not equivalent\n"); printf("exiting out of the program\n"); } else // if kernals are equivalent { // intializing the mintime array for(int i=0; i < NO_OF_PATTERNS;i++) mintime[i] = FLT_MAX; for (int k=1; k < NTIMES; k++) // skiping the first iteration { for (int i=0; i < NO_OF_PATTERNS; i++) { avgtime[i] = avgtime[i] + elapsedTimes[i][k]; mintime[i] = MIN(mintime[i],elapsedTimes[i][k]); maxtime[i] = MAX(maxtime[i], elapsedTimes[i][k]); } } // calculation of the different band widths that are achieved by different access patterns for(int i=0; i < NO_OF_PATTERNS; i++) { avgtime[i] = avgtime[i]/(double)(NTIMES-1); // finding the average time bandWidths[i] = bytes/mintime[i]; } printResults(); printf("\n\n**** successful termination of the program ****\n\n"); } //destroying the events hipEventDestroy(start); hipEventDestroy(stop); //freeing the allocated memory on the device hipFree(myArrayOfTriangles); hipFree(myTrianglesStructure.Ax); hipFree(myTrianglesStructure.Ay); hipFree(myTrianglesStructure.Az); hipFree(myTrianglesStructure.Bx); hipFree(myTrianglesStructure.By); hipFree(myTrianglesStructure.Bz); hipFree(myTrianglesStructure.Cx); hipFree(myTrianglesStructure.Cy); hipFree(myTrianglesStructure.Cz); return 0; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // prints the results containig the minimum, maximum, average times taken by the two initializing kernels // the associated maximum bandwidth of global memory achieved by the kernels // /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void printResults() { printf("Demonstrating the advantages of Structure of arrays over Array of structures in data representation\n"); printf("The transfered data size (Bytes): %ld\n",NO_OF_TRIANGLES*sizeof(float)*9); printf("\n-------------------------------------------------------------------------------------------------------------------------------\n"); printf("Pattern \t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)\n"); printf("-------------------------------------------------------------------------------------------------------------------------------\n"); // printing the results for different access patterns for(int i=0; i < NO_OF_PATTERNS; i++) { switch(i) { case 0: printf("Array of Structures"); break; case 1: printf("Structure of Arrays"); break; } printf("\t %.6f \t\t %f \t\t %f \t\t %f\n",bandWidths[i]/1000000,avgtime[i],mintime[i],maxtime[i]); } printf("\n ------------------------------------------------------------------------------------------------------------------------------\n"); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cfd8fe8224fc013876addc1a4f9a4935cdfe084b.cu
/****************************************************************************************** Source Code : SOAvsAOS.cu Objective : Example code to demonstrate the advantage of having Stucture of arrays rather than array of structures in the application while representing data the corresponding advantages in terms of the bandwidth of the global memory that is achievable Description : This example takes "Triangle" as structure with three arrays of three floating points each representing the three vertices of a triangle the same information is also is stored using a structure "Triangles" which has arrays for each field of each vertex both the representations are intialised generating typical access patterns that will be present while accessing those structures Input : none Output : The different bandwidths that are achieved while accessing data from different data representations Modified : Aug 2011 Author : RarchK ****************************************************************************************/ #include <stdio.h> #include <cuda.h> #include <float.h> #define NO_OF_TRIANGLES 1000000 #define BLOCK_SIZE 128 #define NO_OF_PATTERNS 2 #define NTIMES 10 #define MIN(x,y) ((x)<(y)?(x):(y)) #define MAX(x,y) ((x)>(y)?(x):(y)) void printResults(void); // Triangle structure -- which is the structure in "Array of Structures" struct Triangle { float A[3]; float B[3]; float C[3]; }; // Triangles structure -- which is the structure in "Structure of Arrays" struct Triangles { float *Ax, *Ay, *Az; float *Bx, *By, *Bz; float *Cx, *Cy, *Cz; }; ///////////////////////////////////////////////////////////////////////////////////// // // initializing the array of Triangle types. // each element is intialized to -- (0,0,0),(1,1,1),(2,2,2) // //////////////////////////////////////////////////////////////////////////////////// __global__ void setTriangles(Triangle *myArrayOfTriangles) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx < NO_OF_TRIANGLES) { myArrayOfTriangles[idx].A[0] = 0; myArrayOfTriangles[idx].A[1] = 0; myArrayOfTriangles[idx].A[2] = 0; myArrayOfTriangles[idx].B[0] = 1; myArrayOfTriangles[idx].B[1] = 1; myArrayOfTriangles[idx].B[2] = 1; myArrayOfTriangles[idx].C[0] = 2; myArrayOfTriangles[idx].C[1] = 2; myArrayOfTriangles[idx].C[2] = 2; } } ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// // // initializing the Triangles structure // each triangle's vertices are initialized to -- (0,0,0),(1,1,1),(2,2,2) // //////////////////////////////////////////////////////////////////////////////////// __global__ void setTriangles2(Triangles myTrianglesStructure) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx < NO_OF_TRIANGLES) { myTrianglesStructure.Ax[idx] = 0; myTrianglesStructure.Ay[idx] = 0; myTrianglesStructure.Az[idx] = 0; myTrianglesStructure.Bx[idx] = 1; myTrianglesStructure.By[idx] = 1; myTrianglesStructure.Bz[idx] = 1; myTrianglesStructure.Cx[idx] = 2; myTrianglesStructure.Cy[idx] = 2; myTrianglesStructure.Cz[idx] = 2; } } //////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////// // // checking for the equivalence of both the initialising kernals // ////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void Test(Triangle *myArrayOfTriangles, Triangles myTrianglesStructure, int* dCorrectBool) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx < NO_OF_TRIANGLES) if(myTrianglesStructure.Ax[idx] != myArrayOfTriangles[idx].A[0] || myTrianglesStructure.Ay[idx] != myArrayOfTriangles[idx].A[1] || myTrianglesStructure.Az[idx] != myArrayOfTriangles[idx].A[2] || myTrianglesStructure.Bx[idx] != myArrayOfTriangles[idx].B[0] || myTrianglesStructure.By[idx] != myArrayOfTriangles[idx].B[1] || myTrianglesStructure.Bz[idx] != myArrayOfTriangles[idx].B[2] || myTrianglesStructure.Cx[idx] != myArrayOfTriangles[idx].C[0] || myTrianglesStructure.Cy[idx] != myArrayOfTriangles[idx].C[1] || myTrianglesStructure.Cz[idx] != myArrayOfTriangles[idx].C[2] ) *dCorrectBool = 0; } //////////////////////////////////////////////////////////////////////////////////////////////////////////// static double avgtime[NO_OF_PATTERNS] = {0}, maxtime[NO_OF_PATTERNS] = {0}, mintime[NO_OF_PATTERNS]; static float bandWidths[NO_OF_PATTERNS] = {0}; static double bytes = sizeof(float) * NO_OF_TRIANGLES * 9; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // the main routene // for timing the different kernals which are accessing array of structures and structure of arrays // finding the bandwidths // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char* argv[]) { Triangle *myArrayOfTriangles; // Array of Structures -- array of struct Triangle type Triangles myTrianglesStructure; // Structure of Arrays int* dCorrectBool; // on the device variable int hCorrectBool = 1; float elapsedTimes[NO_OF_PATTERNS][NTIMES]; cudaEvent_t start,stop; cudaError_t err = cudaSuccess; // event creation, which will be used for timing the code cudaEventCreate(&start); cudaEventCreate(&stop); // memory allocation on the device err = cudaMalloc((void **)&myArrayOfTriangles,sizeof(Triangle)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myArrayOfTriangles\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Ax,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Ax\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Ay,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Ay\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Az,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Az\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Bx,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Bx\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.By,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.By\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Bz,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Bz\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Cx,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Cx\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Cy,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Cy\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void **)&myTrianglesStructure.Cz,sizeof(float)*NO_OF_TRIANGLES); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for - myTrianglesStructure.Cz\n exiting out of the program.....\n"); exit(-1); } err = cudaMalloc((void**)&dCorrectBool, sizeof(int)); if(err == cudaErrorMemoryAllocation) { printf("error in device memory allocation for dCorrectBool\n exiting out of the program.....\n"); exit(-1); } // copying hCorrectBool into dCorrectBool cudaMemcpy(dCorrectBool,&hCorrectBool,sizeof(int),cudaMemcpyHostToDevice); //finding the 1D grid size int gridSize = NO_OF_TRIANGLES/BLOCK_SIZE; if( NO_OF_TRIANGLES % BLOCK_SIZE != 0 ) gridSize += 1; // running each pattern NTIMES for(int k=0; k < NTIMES; k++) { // timing the kernels corresponding to different access patterns // PATTERN 1 cudaEventRecord(start,0); setTriangles <<< gridSize , BLOCK_SIZE >>> (myArrayOfTriangles); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimes[0][k],start,stop); // PATTERN 2 cudaEventRecord(start,0); setTriangles2 <<< gridSize , BLOCK_SIZE >>> (myTrianglesStructure); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimes[1][k],start,stop); } // end of the for loop involving NTIMES Test <<< gridSize, BLOCK_SIZE >>> (myArrayOfTriangles,myTrianglesStructure,dCorrectBool); // testing the equivalence of both the intializing kernals // copying back the value of dCorrectBool into hCorrectBool cudaMemcpy(&hCorrectBool,dCorrectBool,sizeof(int),cudaMemcpyDeviceToHost); if(hCorrectBool != 1) // if the kernels are not equivalent { printf("the kernel executions are not equivalent\n"); printf("exiting out of the program\n"); } else // if kernals are equivalent { // intializing the mintime array for(int i=0; i < NO_OF_PATTERNS;i++) mintime[i] = FLT_MAX; for (int k=1; k < NTIMES; k++) // skiping the first iteration { for (int i=0; i < NO_OF_PATTERNS; i++) { avgtime[i] = avgtime[i] + elapsedTimes[i][k]; mintime[i] = MIN(mintime[i],elapsedTimes[i][k]); maxtime[i] = MAX(maxtime[i], elapsedTimes[i][k]); } } // calculation of the different band widths that are achieved by different access patterns for(int i=0; i < NO_OF_PATTERNS; i++) { avgtime[i] = avgtime[i]/(double)(NTIMES-1); // finding the average time bandWidths[i] = bytes/mintime[i]; } printResults(); printf("\n\n**** successful termination of the program ****\n\n"); } //destroying the events cudaEventDestroy(start); cudaEventDestroy(stop); //freeing the allocated memory on the device cudaFree(myArrayOfTriangles); cudaFree(myTrianglesStructure.Ax); cudaFree(myTrianglesStructure.Ay); cudaFree(myTrianglesStructure.Az); cudaFree(myTrianglesStructure.Bx); cudaFree(myTrianglesStructure.By); cudaFree(myTrianglesStructure.Bz); cudaFree(myTrianglesStructure.Cx); cudaFree(myTrianglesStructure.Cy); cudaFree(myTrianglesStructure.Cz); return 0; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // prints the results containig the minimum, maximum, average times taken by the two initializing kernels // the associated maximum bandwidth of global memory achieved by the kernels // /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void printResults() { printf("Demonstrating the advantages of Structure of arrays over Array of structures in data representation\n"); printf("The transfered data size (Bytes): %ld\n",NO_OF_TRIANGLES*sizeof(float)*9); printf("\n-------------------------------------------------------------------------------------------------------------------------------\n"); printf("Pattern \t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)\n"); printf("-------------------------------------------------------------------------------------------------------------------------------\n"); // printing the results for different access patterns for(int i=0; i < NO_OF_PATTERNS; i++) { switch(i) { case 0: printf("Array of Structures"); break; case 1: printf("Structure of Arrays"); break; } printf("\t %.6f \t\t %f \t\t %f \t\t %f\n",bandWidths[i]/1000000,avgtime[i],mintime[i],maxtime[i]); } printf("\n ------------------------------------------------------------------------------------------------------------------------------\n"); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
96886500dff9177513fc7f0bccff908f96769aeb.hip
// !!! This is a file automatically generated by hipify!!! // // Copyright 2004-present Facebook. All Rights Reserved. // #include <stdlib.h> #include <assert.h> #include <iterator> #include <vector> #include <chrono> #include <hip/hip_runtime.h> using namespace std; #define WARP_SIZE 32 #define MASK 0xffffffff /// A simple pair type for CUDA device usage template <typename K, typename V> struct Pair { __host__ __device__ __forceinline__ Pair() {} __host__ __device__ __forceinline__ Pair(K key, V value) : k(key), v(value) {} __host__ __device__ __forceinline__ bool operator==(const Pair<K, V>& rhs) const { return (k == rhs.k) && (v == rhs.v); } __host__ __device__ __forceinline__ bool operator!=(const Pair<K, V>& rhs) const { return !operator==(rhs); } __host__ __device__ __forceinline__ bool operator<(const Pair<K, V>& rhs) const { return (k < rhs.k) || ((k == rhs.k) && (v < rhs.v)); } __host__ __device__ __forceinline__ bool operator>(const Pair<K, V>& rhs) const { return (k > rhs.k) || ((k == rhs.k) && (v > rhs.v)); } K k; V v; }; /** Extract a single bit at `pos` from `val` */ __device__ __forceinline__ int getBit(int val, int pos) { return (val >> pos) & 0x1; } /** Return the current thread's lane in the warp */ __device__ __forceinline__ int getLaneId() { return threadIdx.x % WARP_SIZE; } template <typename T> struct GreaterThan { static __device__ __forceinline__ bool compare(const T lhs, const T rhs) { return (lhs > rhs); } }; template <typename T> struct LessThan { static __device__ __forceinline__ bool compare(const T lhs, const T rhs) { return (lhs < rhs); } }; template <typename T> __device__ __forceinline__ T shfl_xor(const T val, int laneMask, int width = WARP_SIZE) { return __shfl_xor_sync(MASK, val, laneMask, width); } template <typename K, typename V> __device__ __forceinline__ Pair<K, V> shfl_xor(const Pair<K, V>& p, int laneMask, int width = WARP_SIZE) { return Pair<K, V>(__shfl_xor_sync(MASK, p.k, laneMask, width), __shfl_xor_sync(MASK, p.v, laneMask, width)); } template <typename T, typename Comparator> __device__ __forceinline__ T shflSwap(const T x, int mask, int dir) { T y = shfl_xor(x, mask); return Comparator::compare(x, y) == dir ? y : x; } /// Defines a bitonic sort network to exchange 'V' according to /// `SWAP()`'s compare and exchange mechanism across the warp, ordered /// according to the comparator `comp`. In other words, if `comp` is /// `GreaterThan<T>`, then lane 0 will contain the highest `val` /// presented across the warp /// /// See also /// http://on-demand.gputechconf.com/gtc/2013/presentations/S3174-Kepler-Shuffle-Tips-Tricks.pdf template <typename T, typename Comparator> __device__ T warpBitonicSort(T val) { const int laneId = getLaneId(); // 2 val = shflSwap<T, Comparator>(val, 0x01, getBit(laneId, 1) ^ getBit(laneId, 0)); // 4 val = shflSwap<T, Comparator>(val, 0x02, getBit(laneId, 2) ^ getBit(laneId, 1)); val = shflSwap<T, Comparator>(val, 0x01, getBit(laneId, 2) ^ getBit(laneId, 0)); // 8 val = shflSwap<T, Comparator>(val, 0x04, getBit(laneId, 3) ^ getBit(laneId, 2)); val = shflSwap<T, Comparator>(val, 0x02, getBit(laneId, 3) ^ getBit(laneId, 1)); val = shflSwap<T, Comparator>(val, 0x01, getBit(laneId, 3) ^ getBit(laneId, 0)); // 16 val = shflSwap<T, Comparator>(val, 0x08, getBit(laneId, 4) ^ getBit(laneId, 3)); val = shflSwap<T, Comparator>(val, 0x04, getBit(laneId, 4) ^ getBit(laneId, 2)); val = shflSwap<T, Comparator>(val, 0x02, getBit(laneId, 4) ^ getBit(laneId, 1)); val = shflSwap<T, Comparator>(val, 0x01, getBit(laneId, 4) ^ getBit(laneId, 0)); // 32 val = shflSwap<T, Comparator>(val, 0x10, getBit(laneId, 4)); val = shflSwap<T, Comparator>(val, 0x08, getBit(laneId, 3)); val = shflSwap<T, Comparator>(val, 0x04, getBit(laneId, 2)); val = shflSwap<T, Comparator>(val, 0x02, getBit(laneId, 1)); val = shflSwap<T, Comparator>(val, 0x01, getBit(laneId, 0)); return val; } /// Determine if two warp threads have the same value (a collision). template <typename T> __device__ __forceinline__ bool warpHasCollision(T val) { // -sort all values // -compare our lower neighbor's value against ourselves (excepting // the first lane) // -if any lane as a difference of 0, there is a duplicate // (excepting the first lane) val = warpBitonicSort<T, LessThan<T>>(val); const T lower = __shfl_up_sync(MASK, val, 1); // Shuffle for lane 0 will present its same value, so only // subsequent lanes will detect duplicates const bool dup = (lower == val) && (getLaneId() != 0); return (__any_sync(MASK, dup) != 0); } /// Determine if two warp threads have the same value (a collision), /// and returns a bitmask of the lanes that are known to collide with /// other lanes. Not all lanes that are mutually colliding return a /// bit; all lanes with a `1` bit are guaranteed to collide with a /// lane with a `0` bit, so the mask can be used to serialize /// execution for lanes that collide with others. /// (mask | (mask >> 1)) will yield all mutually colliding lanes. template <typename T> __device__ __forceinline__ unsigned int warpCollisionMask(T val) { // -sort all (lane, value) pairs on value // -compare our lower neighbor's value against ourselves (excepting // the first lane) // -if any lane as a difference of 0, there is a duplicate // (excepting the first lane) // -shuffle sort (originating lane, dup) pairs back to the original // lane and report Pair<T, int> pVal(val, getLaneId()); pVal = warpBitonicSort<Pair<T, int>, LessThan<Pair<T, int> > >(pVal); // If our neighbor is the same as us, we know our thread's value is // duplicated. All except for lane 0, since shfl will present its // own value (and if lane 0's value is duplicated, lane 1 will pick // that up) const unsigned long lower = __shfl_up_sync(MASK, pVal.k, 1); Pair<int, bool> dup(pVal.v, (lower == pVal.k) && (getLaneId() != 0)); // Sort back based on lane ID so each thread originally knows // whether or not it duplicated dup = warpBitonicSort<Pair<int, bool>, LessThan<Pair<int, bool> > >(dup); return __ballot_sync(MASK, dup.v); } __device__ int hasDuplicate[32]; __global__ void checkDuplicates(int num, const int* v) { hasDuplicate[threadIdx.x] = (int) warpHasCollision(v[threadIdx.x]); } __device__ unsigned int duplicateMask; __global__ void checkDuplicateMask(int num, const int* v) { unsigned int mask = warpCollisionMask(v[threadIdx.x]); if (threadIdx.x == 0) { duplicateMask = mask; } } vector<int> checkDuplicates(const vector<int>& v) { int* devSet = NULL; hipMalloc(&devSet, v.size() * sizeof(int)); hipMemcpy(devSet, v.data(), v.size() * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( checkDuplicates), dim3(1), dim3(32), 0, 0, v.size(), devSet); vector<int> hasDuplicates(32, false); hipMemcpyFromSymbol(hasDuplicates.data(), hasDuplicate, sizeof(int) * 32, 0, hipMemcpyDeviceToHost); hipFree(devSet); return hasDuplicates; } unsigned int checkDuplicateMask(const vector<int>& v) { int* devSet = NULL; hipMalloc(&devSet, v.size() * sizeof(int)); hipMemcpy(devSet, v.data(), v.size() * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( checkDuplicateMask), dim3(1), dim3(32), 0, 0, v.size(), devSet); unsigned int mask = 0; hipDeviceSynchronize(); hipMemcpyFromSymbol(&mask, duplicateMask, sizeof(unsigned int), 0, hipMemcpyDeviceToHost); hipFree(devSet); return mask; } void test_collision(const int ND) { for (int numDups = 0; numDups < ND; ++numDups) { vector<int> v; for (int i = 0; i < ND - numDups; ++i) { int r = 0; while (true) { r = rand(); bool found = false; for (unsigned int j = 0; j < v.size(); ++j) { if (v[j] == r) { found = true; break; } } if (!found) { break; } } v.push_back(r); } for (int i = 0; i < numDups; ++i) { v.push_back(v[0]); } assert(ND == v.size()); auto dupCheck = checkDuplicates(v); for (auto dup : dupCheck) { assert((numDups > 0) == dup); } } } void test_collisionMask(const int ND) { for (int numDups = 0; numDups < ND; ++numDups) { vector<int> v; for (int i = 0; i < ND - numDups; ++i) { int r = 0; while (true) { r = rand(); bool found = false; for (unsigned int j = 0; j < v.size(); ++j) { if (v[j] == r) { found = true; break; } } if (!found) { break; } } v.push_back(r); } for (int i = 0; i < numDups; ++i) { v.push_back(v[0]); } assert (ND == v.size()); auto mask = checkDuplicateMask(v); auto expected = numDups > 0 ? 0xffffffffU << (ND - numDups) : 0; if (expected != mask) { printf("Error: numDups=%d expected=%x mask=%x\n", numDups, expected, mask); break; } } } int main(int argc, char* argv[]) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } srand(123); const int num_dup = 32; const int repeat = atoi(argv[1]); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) test_collision(num_dup); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of the function test_collision: %f (us)\n", time * 1e-3f / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) test_collisionMask(num_dup); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of the function test_collisionMask: %f (us)\n", time * 1e-3f / repeat); return 0; }
96886500dff9177513fc7f0bccff908f96769aeb.cu
// // Copyright 2004-present Facebook. All Rights Reserved. // #include <stdlib.h> #include <assert.h> #include <iterator> #include <vector> #include <chrono> #include <cuda.h> using namespace std; #define WARP_SIZE 32 #define MASK 0xffffffff /// A simple pair type for CUDA device usage template <typename K, typename V> struct Pair { __host__ __device__ __forceinline__ Pair() {} __host__ __device__ __forceinline__ Pair(K key, V value) : k(key), v(value) {} __host__ __device__ __forceinline__ bool operator==(const Pair<K, V>& rhs) const { return (k == rhs.k) && (v == rhs.v); } __host__ __device__ __forceinline__ bool operator!=(const Pair<K, V>& rhs) const { return !operator==(rhs); } __host__ __device__ __forceinline__ bool operator<(const Pair<K, V>& rhs) const { return (k < rhs.k) || ((k == rhs.k) && (v < rhs.v)); } __host__ __device__ __forceinline__ bool operator>(const Pair<K, V>& rhs) const { return (k > rhs.k) || ((k == rhs.k) && (v > rhs.v)); } K k; V v; }; /** Extract a single bit at `pos` from `val` */ __device__ __forceinline__ int getBit(int val, int pos) { return (val >> pos) & 0x1; } /** Return the current thread's lane in the warp */ __device__ __forceinline__ int getLaneId() { return threadIdx.x % WARP_SIZE; } template <typename T> struct GreaterThan { static __device__ __forceinline__ bool compare(const T lhs, const T rhs) { return (lhs > rhs); } }; template <typename T> struct LessThan { static __device__ __forceinline__ bool compare(const T lhs, const T rhs) { return (lhs < rhs); } }; template <typename T> __device__ __forceinline__ T shfl_xor(const T val, int laneMask, int width = WARP_SIZE) { return __shfl_xor_sync(MASK, val, laneMask, width); } template <typename K, typename V> __device__ __forceinline__ Pair<K, V> shfl_xor(const Pair<K, V>& p, int laneMask, int width = WARP_SIZE) { return Pair<K, V>(__shfl_xor_sync(MASK, p.k, laneMask, width), __shfl_xor_sync(MASK, p.v, laneMask, width)); } template <typename T, typename Comparator> __device__ __forceinline__ T shflSwap(const T x, int mask, int dir) { T y = shfl_xor(x, mask); return Comparator::compare(x, y) == dir ? y : x; } /// Defines a bitonic sort network to exchange 'V' according to /// `SWAP()`'s compare and exchange mechanism across the warp, ordered /// according to the comparator `comp`. In other words, if `comp` is /// `GreaterThan<T>`, then lane 0 will contain the highest `val` /// presented across the warp /// /// See also /// http://on-demand.gputechconf.com/gtc/2013/presentations/S3174-Kepler-Shuffle-Tips-Tricks.pdf template <typename T, typename Comparator> __device__ T warpBitonicSort(T val) { const int laneId = getLaneId(); // 2 val = shflSwap<T, Comparator>(val, 0x01, getBit(laneId, 1) ^ getBit(laneId, 0)); // 4 val = shflSwap<T, Comparator>(val, 0x02, getBit(laneId, 2) ^ getBit(laneId, 1)); val = shflSwap<T, Comparator>(val, 0x01, getBit(laneId, 2) ^ getBit(laneId, 0)); // 8 val = shflSwap<T, Comparator>(val, 0x04, getBit(laneId, 3) ^ getBit(laneId, 2)); val = shflSwap<T, Comparator>(val, 0x02, getBit(laneId, 3) ^ getBit(laneId, 1)); val = shflSwap<T, Comparator>(val, 0x01, getBit(laneId, 3) ^ getBit(laneId, 0)); // 16 val = shflSwap<T, Comparator>(val, 0x08, getBit(laneId, 4) ^ getBit(laneId, 3)); val = shflSwap<T, Comparator>(val, 0x04, getBit(laneId, 4) ^ getBit(laneId, 2)); val = shflSwap<T, Comparator>(val, 0x02, getBit(laneId, 4) ^ getBit(laneId, 1)); val = shflSwap<T, Comparator>(val, 0x01, getBit(laneId, 4) ^ getBit(laneId, 0)); // 32 val = shflSwap<T, Comparator>(val, 0x10, getBit(laneId, 4)); val = shflSwap<T, Comparator>(val, 0x08, getBit(laneId, 3)); val = shflSwap<T, Comparator>(val, 0x04, getBit(laneId, 2)); val = shflSwap<T, Comparator>(val, 0x02, getBit(laneId, 1)); val = shflSwap<T, Comparator>(val, 0x01, getBit(laneId, 0)); return val; } /// Determine if two warp threads have the same value (a collision). template <typename T> __device__ __forceinline__ bool warpHasCollision(T val) { // -sort all values // -compare our lower neighbor's value against ourselves (excepting // the first lane) // -if any lane as a difference of 0, there is a duplicate // (excepting the first lane) val = warpBitonicSort<T, LessThan<T>>(val); const T lower = __shfl_up_sync(MASK, val, 1); // Shuffle for lane 0 will present its same value, so only // subsequent lanes will detect duplicates const bool dup = (lower == val) && (getLaneId() != 0); return (__any_sync(MASK, dup) != 0); } /// Determine if two warp threads have the same value (a collision), /// and returns a bitmask of the lanes that are known to collide with /// other lanes. Not all lanes that are mutually colliding return a /// bit; all lanes with a `1` bit are guaranteed to collide with a /// lane with a `0` bit, so the mask can be used to serialize /// execution for lanes that collide with others. /// (mask | (mask >> 1)) will yield all mutually colliding lanes. template <typename T> __device__ __forceinline__ unsigned int warpCollisionMask(T val) { // -sort all (lane, value) pairs on value // -compare our lower neighbor's value against ourselves (excepting // the first lane) // -if any lane as a difference of 0, there is a duplicate // (excepting the first lane) // -shuffle sort (originating lane, dup) pairs back to the original // lane and report Pair<T, int> pVal(val, getLaneId()); pVal = warpBitonicSort<Pair<T, int>, LessThan<Pair<T, int> > >(pVal); // If our neighbor is the same as us, we know our thread's value is // duplicated. All except for lane 0, since shfl will present its // own value (and if lane 0's value is duplicated, lane 1 will pick // that up) const unsigned long lower = __shfl_up_sync(MASK, pVal.k, 1); Pair<int, bool> dup(pVal.v, (lower == pVal.k) && (getLaneId() != 0)); // Sort back based on lane ID so each thread originally knows // whether or not it duplicated dup = warpBitonicSort<Pair<int, bool>, LessThan<Pair<int, bool> > >(dup); return __ballot_sync(MASK, dup.v); } __device__ int hasDuplicate[32]; __global__ void checkDuplicates(int num, const int* v) { hasDuplicate[threadIdx.x] = (int) warpHasCollision(v[threadIdx.x]); } __device__ unsigned int duplicateMask; __global__ void checkDuplicateMask(int num, const int* v) { unsigned int mask = warpCollisionMask(v[threadIdx.x]); if (threadIdx.x == 0) { duplicateMask = mask; } } vector<int> checkDuplicates(const vector<int>& v) { int* devSet = NULL; cudaMalloc(&devSet, v.size() * sizeof(int)); cudaMemcpy(devSet, v.data(), v.size() * sizeof(int), cudaMemcpyHostToDevice); checkDuplicates<<<1, 32>>>(v.size(), devSet); vector<int> hasDuplicates(32, false); cudaMemcpyFromSymbol(hasDuplicates.data(), hasDuplicate, sizeof(int) * 32, 0, cudaMemcpyDeviceToHost); cudaFree(devSet); return hasDuplicates; } unsigned int checkDuplicateMask(const vector<int>& v) { int* devSet = NULL; cudaMalloc(&devSet, v.size() * sizeof(int)); cudaMemcpy(devSet, v.data(), v.size() * sizeof(int), cudaMemcpyHostToDevice); checkDuplicateMask<<<1, 32>>>(v.size(), devSet); unsigned int mask = 0; cudaDeviceSynchronize(); cudaMemcpyFromSymbol(&mask, duplicateMask, sizeof(unsigned int), 0, cudaMemcpyDeviceToHost); cudaFree(devSet); return mask; } void test_collision(const int ND) { for (int numDups = 0; numDups < ND; ++numDups) { vector<int> v; for (int i = 0; i < ND - numDups; ++i) { int r = 0; while (true) { r = rand(); bool found = false; for (unsigned int j = 0; j < v.size(); ++j) { if (v[j] == r) { found = true; break; } } if (!found) { break; } } v.push_back(r); } for (int i = 0; i < numDups; ++i) { v.push_back(v[0]); } assert(ND == v.size()); auto dupCheck = checkDuplicates(v); for (auto dup : dupCheck) { assert((numDups > 0) == dup); } } } void test_collisionMask(const int ND) { for (int numDups = 0; numDups < ND; ++numDups) { vector<int> v; for (int i = 0; i < ND - numDups; ++i) { int r = 0; while (true) { r = rand(); bool found = false; for (unsigned int j = 0; j < v.size(); ++j) { if (v[j] == r) { found = true; break; } } if (!found) { break; } } v.push_back(r); } for (int i = 0; i < numDups; ++i) { v.push_back(v[0]); } assert (ND == v.size()); auto mask = checkDuplicateMask(v); auto expected = numDups > 0 ? 0xffffffffU << (ND - numDups) : 0; if (expected != mask) { printf("Error: numDups=%d expected=%x mask=%x\n", numDups, expected, mask); break; } } } int main(int argc, char* argv[]) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } srand(123); const int num_dup = 32; const int repeat = atoi(argv[1]); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) test_collision(num_dup); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of the function test_collision: %f (us)\n", time * 1e-3f / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) test_collisionMask(num_dup); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of the function test_collisionMask: %f (us)\n", time * 1e-3f / repeat); return 0; }
886418af3586e154b2f11d192dc08c6fa7a74d35.hip
// !!! This is a file automatically generated by hipify!!! /** \file utils.cu * \brief Definizioni di alcune macro e strutture dati utili a diverse funzioni su device. */ #ifndef FLAGUTILS_CU #define FLAGUTILS_CU 1 #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "utils.h" /** @cond NONDOC */ // non doxycumentati extern "C" void checkNullAllocation (void *ptr, const char *str); extern "C" int **reallocazionePtrInt(ulong newsize, int ** old); extern "C" void exitWithError(const char *mess, const char *string); /** @endcond */ uint uplog2_int (uint val) { uint res = 0; if (val <= 0) return(0); while (val != 0) { val >>= 1; res++; } return(res); } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "ERRORE CUDA: >%s<: >%s<. Eseguo:EXIT\n", msg, hipGetErrorString(err) ); exit(-1); } } void checkDevice(int *deviceCount, config *conf) { hipError_t error_id = hipGetDeviceCount(deviceCount); if (error_id != hipSuccess) { exitWithError("hipGetDeviceCount returned: >%s<\n", hipGetErrorString(error_id)); } if ((*deviceCount) > (conf->deviceid)) { // il device scelto // fprintf(stderr,"\nOOOOOOOOOOOOO deviceCount:%d deviceid:%d)\n\n", (*deviceCount), conf->deviceid); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, conf->deviceid); conf->deviceProp_totalGlobalMem = deviceProp.totalGlobalMem; conf->maxThreadsPerBlock = deviceProp.maxThreadsPerBlock; conf->sharedMemPerBlock = deviceProp.sharedMemPerBlock; conf->warpSize = deviceProp.warpSize; conf->capabilityMajor = deviceProp.major; conf->capabilityMinor = deviceProp.minor; conf->clockRate = deviceProp.clockRate; conf->ECCEnabled = deviceProp.ECCEnabled; //conf->multiProcessorCount = deviceProp.multiProcessorCount; memcpy(conf->devicename, deviceProp.name, 256*sizeof(char)); } else { exitWithError("checkDevice returned: >%s<\n", "Selected device not available"); } } #endif
886418af3586e154b2f11d192dc08c6fa7a74d35.cu
/** \file utils.cu * \brief Definizioni di alcune macro e strutture dati utili a diverse funzioni su device. */ #ifndef FLAGUTILS_CU #define FLAGUTILS_CU 1 #include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "utils.h" /** @cond NONDOC */ // non doxycumentati extern "C" void checkNullAllocation (void *ptr, const char *str); extern "C" int **reallocazionePtrInt(ulong newsize, int ** old); extern "C" void exitWithError(const char *mess, const char *string); /** @endcond */ uint uplog2_int (uint val) { uint res = 0; if (val <= 0) return(0); while (val != 0) { val >>= 1; res++; } return(res); } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "ERRORE CUDA: >%s<: >%s<. Eseguo:EXIT\n", msg, cudaGetErrorString(err) ); exit(-1); } } void checkDevice(int *deviceCount, config *conf) { cudaError_t error_id = cudaGetDeviceCount(deviceCount); if (error_id != cudaSuccess) { exitWithError("cudaGetDeviceCount returned: >%s<\n", cudaGetErrorString(error_id)); } if ((*deviceCount) > (conf->deviceid)) { // il device scelto // fprintf(stderr,"\nOOOOOOOOOOOOO deviceCount:%d deviceid:%d)\n\n", (*deviceCount), conf->deviceid); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, conf->deviceid); conf->deviceProp_totalGlobalMem = deviceProp.totalGlobalMem; conf->maxThreadsPerBlock = deviceProp.maxThreadsPerBlock; conf->sharedMemPerBlock = deviceProp.sharedMemPerBlock; conf->warpSize = deviceProp.warpSize; conf->capabilityMajor = deviceProp.major; conf->capabilityMinor = deviceProp.minor; conf->clockRate = deviceProp.clockRate; conf->ECCEnabled = deviceProp.ECCEnabled; //conf->multiProcessorCount = deviceProp.multiProcessorCount; memcpy(conf->devicename, deviceProp.name, 256*sizeof(char)); } else { exitWithError("checkDevice returned: >%s<\n", "Selected device not available"); } } #endif
94d5a3146b50dec180a3b176761756f0651a6ef2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GpGpu/GpGpu_ParamCorrelation.cuh" texture< pixel, hipTextureType2D > TexS_MaskTerD; __global__ void dilateKernel(pixel* dataOut, int r, uint2 dim, uint2 dimH) { __shared__ pixel out[ BLOCKDIM ][ BLOCKDIM ]; out[threadIdx.y][threadIdx.x] = 0; __syncthreads(); const int2 ptH = make_int2( blockIdx.x * (blockDim.x - 2*r) + threadIdx.x, blockIdx.y * (blockDim.y - 2*r) + threadIdx.y); const int2 pt = ptH - r ; if (threadIdx.x >= r && threadIdx.y >= r && threadIdx.x < blockDim.x - r && threadIdx.y < blockDim.y - r) { const pixel p = tex2D(TexS_MaskTerD, pt.x, pt.y); if (p == 1 && !(oI(pt,0) || oSE(pt,dim))) { #pragma unroll for (int i = threadIdx.x - r; i < threadIdx.x ;i++) out[threadIdx.y][i] = 1; #pragma unroll for (int i = threadIdx.x; i < threadIdx.x + r + 1;i++) out[threadIdx.y][i] = 1; } } __syncthreads(); if (out[threadIdx.y][threadIdx.x] == 1 ) { #pragma unroll for (int j = threadIdx.y - r; j < threadIdx.y + r +1 ;j++) out[j][threadIdx.x] = 1; } __syncthreads(); if (out[threadIdx.y][threadIdx.x] == 1 ) dataOut[to1D(ptH,dimH)] = 1; } extern "C" void dilateKernel(pixel* HostDataOut, short r, uint2 dim) { dim3 threads( BLOCKDIM, BLOCKDIM, 1); uint2 thd2D = make_uint2(threads); uint2 actiThsCo = thd2D - 2 * r; uint2 block2D = iDivUp(dim,actiThsCo); dim3 blocks(block2D.x , block2D.y,1); CuDeviceData2D<pixel> deviceDataOut; uint2 dimDM = dim + 2*r; deviceDataOut.Realloc(dimDM); deviceDataOut.Memset(0); hipLaunchKernelGGL(( dilateKernel), dim3(blocks),dim3(threads), 0, 0, deviceDataOut.pData(),r,dim,dimDM); getLastCudaError("DilateX kernel failed"); deviceDataOut.DecoratorDeviceData::CopyDevicetoHost(HostDataOut); deviceDataOut.Dealloc(); }
94d5a3146b50dec180a3b176761756f0651a6ef2.cu
#include "GpGpu/GpGpu_ParamCorrelation.cuh" texture< pixel, cudaTextureType2D > TexS_MaskTerD; __global__ void dilateKernel(pixel* dataOut, int r, uint2 dim, uint2 dimH) { __shared__ pixel out[ BLOCKDIM ][ BLOCKDIM ]; out[threadIdx.y][threadIdx.x] = 0; __syncthreads(); const int2 ptH = make_int2( blockIdx.x * (blockDim.x - 2*r) + threadIdx.x, blockIdx.y * (blockDim.y - 2*r) + threadIdx.y); const int2 pt = ptH - r ; if (threadIdx.x >= r && threadIdx.y >= r && threadIdx.x < blockDim.x - r && threadIdx.y < blockDim.y - r) { const pixel p = tex2D(TexS_MaskTerD, pt.x, pt.y); if (p == 1 && !(oI(pt,0) || oSE(pt,dim))) { #pragma unroll for (int i = threadIdx.x - r; i < threadIdx.x ;i++) out[threadIdx.y][i] = 1; #pragma unroll for (int i = threadIdx.x; i < threadIdx.x + r + 1;i++) out[threadIdx.y][i] = 1; } } __syncthreads(); if (out[threadIdx.y][threadIdx.x] == 1 ) { #pragma unroll for (int j = threadIdx.y - r; j < threadIdx.y + r +1 ;j++) out[j][threadIdx.x] = 1; } __syncthreads(); if (out[threadIdx.y][threadIdx.x] == 1 ) dataOut[to1D(ptH,dimH)] = 1; } extern "C" void dilateKernel(pixel* HostDataOut, short r, uint2 dim) { dim3 threads( BLOCKDIM, BLOCKDIM, 1); uint2 thd2D = make_uint2(threads); uint2 actiThsCo = thd2D - 2 * r; uint2 block2D = iDivUp(dim,actiThsCo); dim3 blocks(block2D.x , block2D.y,1); CuDeviceData2D<pixel> deviceDataOut; uint2 dimDM = dim + 2*r; deviceDataOut.Realloc(dimDM); deviceDataOut.Memset(0); dilateKernel<<<blocks,threads>>>(deviceDataOut.pData(),r,dim,dimDM); getLastCudaError("DilateX kernel failed"); deviceDataOut.DecoratorDeviceData::CopyDevicetoHost(HostDataOut); deviceDataOut.Dealloc(); }
27db01ebaf2a8e0d9e4bcce14966912970d64878.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include <iostream> #include <cmath> #include <cstdio> #include <algorithm> #include "polyiou.hpp" using namespace std; #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ std::cout << hipGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; #define maxn 51 const double eps = 1E-8; __device__ inline int sig(float d) { return(d > eps) - (d < -eps); } // PointCUDAfloat2 // struct Point{ // double x,y; Point(){} // Point(double x,double y):x(x),y(y){} // bool operator==(const Point&p)const{ // return sig(x-p.x)==0&&sig(y-p.y)==0; // } // }; __device__ inline int point_eq(const float2 a, const float2 b) { return (sig(a.x - b.x) == 0) && (sig(a.y - b.y) == 0); } __device__ inline void point_swap(float2 *a, float2 *b) { float2 temp = *a; *a = *b; *b = temp; } // void point_reverse(Point* first, Point* last) // { // while ((first!=last)&&(first!=--last)) { // point_swap (first,last); // ++first; // } // } __device__ inline void point_reverse(float2 *first, float2* last) { while ((first != last) && (first != --last)) { point_swap(first, last); ++first; } } __device__ inline float cross(float2 o, float2 a, float2 b) { return(a.x - o.x)*(b.y - o.y) - (b.x - o.x)*(a.y - o.y); } __device__ inline float area(float2* ps, int n) { ps[n] = ps[0]; float res = 0; for (int i = 0; i < n; i++) { res += ps[i].x*ps[i + 1].y - ps[i].y*ps[i + 1].x; } return res / 2.0; } __device__ inline int lineCross(float2 a, float2 b, float2 c, float2 d, float2&p) { float s1, s2; s1 = cross(a, b, c); s2 = cross(a, b, d); if (sig(s1) == 0 && sig(s2) == 0) return 2; if (sig(s2 - s1) == 0) return 0; p.x = (c.x*s2 - d.x*s1) / (s2 - s1); p.y = (c.y*s2 - d.y*s1) / (s2 - s1); return 1; } // //abp(a,b) //,n1 __device__ inline void polygon_cut(float2*p, int&n, float2 a, float2 b, float2* pp) { // TODO: The static variable may be the reason, why single thread is ok, // multiple threads are not work // printf("polygon_cut, offset\n"); // static float2 pp[maxn]; int m = 0; p[n] = p[0]; for (int i = 0; i < n; i++) { if (sig(cross(a, b, p[i])) > 0) pp[m++] = p[i]; if (sig(cross(a, b, p[i])) != sig(cross(a, b, p[i + 1]))) lineCross(a, b, p[i], p[i + 1], pp[m++]); } n = 0; for (int i = 0; i < m; i++) if (!i || !(point_eq(pp[i], pp[i - 1]))) p[n++] = pp[i]; // while(n>1&&p[n-1]==p[0])n--; while (n > 1 && point_eq(p[n - 1], p[0]))n--; // int x = blockIdx.x * blockDim.x + threadIdx.x; // // corresponding to k // int y = blockIdx.y * blockDim.y + threadIdx.y; // int offset = x * 1 + y; // printf("polygon_cut, offset\n"); } //--------------------------------// //oabocd,o// __device__ inline float intersectArea(float2 a, float2 b, float2 c, float2 d) { float2 o = make_float2(0, 0); int s1 = sig(cross(o, a, b)); int s2 = sig(cross(o, c, d)); if (s1 == 0 || s2 == 0)return 0.0;//0 // if(s1==-1) swap(a,b); // if(s2==-1) swap(c,d); // printf("before swap\n"); // printf("a.x %f, a.y %f\n", a.x, a.y); // printf("b.x %f, b.y %f\n", b.x, b.y); if (s1 == -1) point_swap(&a, &b); // printf("a.x %f, a.y %f\n", a.x, a.y); // printf("b.x %f, b.y %f\n", b.x, b.y); // printf("after swap\n"); if (s2 == -1) point_swap(&c, &d); float2 p[10] = { o,a,b }; int n = 3; // // manually implement polygon_cut(p, n, a, b) // float2 pp[maxn]; // // polygon_cut(p, n, o, c) // int m=0;p[n]=p[0]; // for(int i=0;i<n;i++){ // if(sig(cross(o,c,p[i]))>0) pp[m++]=p[i]; // if(sig(cross(o,c,p[i]))!=sig(cross(o,c,p[i+1]))) // lineCross(o,c,p[i],p[i+1],pp[m++]); // } // n=0; // for(int i=0;i<m;i++) // if(!i||!(point_eq(pp[i], pp[i-1]))) // p[n++]=pp[i]; // while(n>1&&point_eq(p[n-1], p[0]))n--; // // polygon_cut(p, n, c, d) // m=0;p[n]=p[0]; // for(int i=0;i<n;i++){ // if(sig(cross(c,d,p[i]))>0) pp[m++]=p[i]; // if(sig(cross(c,d,p[i]))!=sig(cross(c,d,p[i+1]))) // lineCross(c,d,p[i],p[i+1],pp[m++]); // } // n=0; // for(int i=0;i<m;i++) // if(!i||!(point_eq(pp[i], pp[i-1]))) // p[n++]=pp[i]; // while(n>1&&point_eq(p[n-1], p[0]))n--; // // polygon_cut(p, n, d, o) // m=0;p[n]=p[0]; // for(int i=0;i<n;i++){ // if(sig(cross(d,o,p[i]))>0) pp[m++]=p[i]; // if(sig(cross(d,o,p[i]))!=sig(cross(d,o,p[i+1]))) // lineCross(d,o,p[i],p[i+1],pp[m++]); // } // n=0; // for(int i=0;i<m;i++) // if(!i||!(point_eq(pp[i], pp[i-1]))) // p[n++]=pp[i]; // while(n>1&&point_eq(p[n-1], p[0]))n--; float2 pp[maxn]; polygon_cut(p, n, o, c, pp); polygon_cut(p, n, c, d, pp); polygon_cut(p, n, d, o, pp); float res = fabs(area(p, n)); int x = blockIdx.x * blockDim.x + threadIdx.x; // corresponding to k int y = blockIdx.y * blockDim.y + threadIdx.y; int offset = x * 1 + y; // printf("intersectArea2, offset: %d, %f, %f, %f, %f, %f, %f, %f, %f, res: %f\n", offset, a.x, a.y, b.x, b.y, c.x, c.y, d.x, d.y, res); if (s1*s2 == -1) res = -res; return res; } // // TODO: here changed the input, this need to be debug __device__ inline float intersectArea(float2*ps1, int n1, float2*ps2, int n2) { int x = blockIdx.x * blockDim.x + threadIdx.x; // corresponding to k int y = blockIdx.y * blockDim.y + threadIdx.y; int offset = x * 1 + y; if (area(ps1, n1) < 0) point_reverse(ps1, ps1 + n1); if (area(ps2, n2) < 0) point_reverse(ps2, ps2 + n2); ps1[n1] = ps1[0]; ps2[n2] = ps2[0]; float res = 0; for (int i = 0; i < n1; i++) { for (int j = 0; j < n2; j++) { // printf("offset: %d, %f, %f, %f, %f, %f, %f, %f, %f addArea: %f \n", // offset, ps1[i].x, ps1[i].y, ps1[i + 1].x, ps1[i + 1].y, ps2[j].x, ps2[j].y, // ps2[j + 1].x, ps2[j + 1].y, intersectArea(ps1[i],ps1[i+1],ps2[j],ps2[j+1])); // float2 a = ps1[i]; // float2 b = ps1[i+1]; // float2 c = ps2[j]; // float2 d = ps2[j+1]; // res+=intersectArea2(a,b,c,d); res += intersectArea(ps1[i], ps1[i + 1], ps2[j], ps2[j + 1]); } } return res;//assumeresispositive! } __device__ inline void RotBox2Poly(float const * const dbox, float2 * ps) { float cs = cos(dbox[4]); float ss = sin(dbox[4]); float w = dbox[2]; float h = dbox[3]; float x_ctr = dbox[0]; float y_ctr = dbox[1]; ps[0].x = x_ctr + cs * (w / 2.0) - ss * (-h / 2.0); ps[1].x = x_ctr + cs * (w / 2.0) - ss * (h / 2.0); ps[2].x = x_ctr + cs * (-w / 2.0) - ss * (h / 2.0); ps[3].x = x_ctr + cs * (-w / 2.0) - ss * (-h / 2.0); ps[0].y = y_ctr + ss * (w / 2.0) + cs * (-h / 2.0); ps[1].y = y_ctr + ss * (w / 2.0) + cs * (h / 2.0); ps[2].y = y_ctr + ss * (-w / 2.0) + cs * (h / 2.0); ps[3].y = y_ctr + ss * (-w / 2.0) + cs * (-h / 2.0); } __device__ inline float devPolyIoU(float const * const dbbox1, float const * const dbbox2) { float2 ps1[maxn], ps2[maxn]; int n1 = 4; int n2 = 4; RotBox2Poly(dbbox1, ps1); RotBox2Poly(dbbox2, ps2); // printf("ps1: %f, %f, %f, %f, %f, %f, %f, %f\n", ps1[0].x, ps1[0].y, ps1[1].x, ps1[1].y, ps1[2].x, ps1[2].y, ps1[3].x, ps1[3].y); // printf("ps2: %f, %f, %f, %f, %f, %f, %f, %f\n", ps2[0].x, ps2[0].y, ps2[1].x, ps2[1].y, ps2[2].x, ps2[2].y, ps2[3].x, ps2[3].y); float inter_area = intersectArea(ps1, n1, ps2, n2); // printf("inter_area: %f \n", inter_area); float union_area = fabs(area(ps1, n1)) + fabs(area(ps2, n2)) - inter_area; // printf("before union_area\n"); // printf("union_area: %f \n", union_area); float iou = 0.0f; if (sig(union_area) > 0) iou = inter_area / union_area; // printf("iou: %f \n", iou); return iou; } __global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes, const float * dev_query_boxes, float* dev_overlaps) { // const int col_start = blockIdx.y; // const int row_start = blockIdx.x; // corresponding to n int x = blockIdx.x * blockDim.x + threadIdx.x; // corresponding to k int y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < N) && (y < K)) { int offset = x * K + y; //printf // printf("offset: %d dbbox: %f %f %f %f %f\n", offset, (dev_boxes + x*5)[0], // (dev_boxes + x*5)[1], (dev_boxes + x*5)[2], (dev_boxes + x*5)[3], // (dev_boxes + x*5)[4] ); // printf("offset: %d dbbox: %f %f %f %f %f\n", offset, (dev_query_boxes + y*5)[0], // (dev_query_boxes + y*5)[1], (dev_query_boxes + y*5)[2], (dev_query_boxes + y*5)[3], // (dev_query_boxes + y*5)[4] ); dev_overlaps[offset] = devPolyIoU(dev_boxes + x * 5, dev_query_boxes + y * 5); } } void _set_device(int device_id) { int current_device; CUDA_CHECK(hipGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to hipSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(hipSetDevice(device_id)); } void _overlaps(float* overlaps, const float* boxes, const float* query_boxes, int n, int k, int device_id) { _set_device(device_id); float* overlaps_dev = NULL; float* boxes_dev = NULL; float* query_boxes_dev = NULL; CUDA_CHECK(hipMalloc(&boxes_dev, n * 5 * sizeof(float))); CUDA_CHECK(hipMemcpy(boxes_dev, boxes, n * 5 * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&query_boxes_dev, k * 5 * sizeof(float))); CUDA_CHECK(hipMemcpy(query_boxes_dev, query_boxes, k * 5 * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&overlaps_dev, n * k * sizeof(float))); dim3 blocks(DIVUP(n, 32), DIVUP(k, 32)); dim3 threads(32, 32); // 1024 hipLaunchKernelGGL(( overlaps_kernel), dim3(blocks), dim3(threads), 0, 0, n, k, boxes_dev, query_boxes_dev, overlaps_dev); CUDA_CHECK(hipMemcpy(overlaps, overlaps_dev, n * k * sizeof(float), hipMemcpyDeviceToHost)); CUDA_CHECK(hipFree(overlaps_dev)); CUDA_CHECK(hipFree(boxes_dev)); CUDA_CHECK(hipFree(query_boxes_dev)); }
27db01ebaf2a8e0d9e4bcce14966912970d64878.cu
#include <vector> #include <iostream> #include <cmath> #include <cstdio> #include <algorithm> #include "polyiou.hpp" using namespace std; #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; #define maxn 51 const double eps = 1E-8; __device__ inline int sig(float d) { return(d > eps) - (d < -eps); } // Point使用CUDA中float2类型代替 // struct Point{ // double x,y; Point(){} // Point(double x,double y):x(x),y(y){} // bool operator==(const Point&p)const{ // return sig(x-p.x)==0&&sig(y-p.y)==0; // } // }; __device__ inline int point_eq(const float2 a, const float2 b) { return (sig(a.x - b.x) == 0) && (sig(a.y - b.y) == 0); } __device__ inline void point_swap(float2 *a, float2 *b) { float2 temp = *a; *a = *b; *b = temp; } // void point_reverse(Point* first, Point* last) // { // while ((first!=last)&&(first!=--last)) { // point_swap (first,last); // ++first; // } // } __device__ inline void point_reverse(float2 *first, float2* last) { while ((first != last) && (first != --last)) { point_swap(first, last); ++first; } } __device__ inline float cross(float2 o, float2 a, float2 b) { return(a.x - o.x)*(b.y - o.y) - (b.x - o.x)*(a.y - o.y); } __device__ inline float area(float2* ps, int n) { ps[n] = ps[0]; float res = 0; for (int i = 0; i < n; i++) { res += ps[i].x*ps[i + 1].y - ps[i].y*ps[i + 1].x; } return res / 2.0; } __device__ inline int lineCross(float2 a, float2 b, float2 c, float2 d, float2&p) { float s1, s2; s1 = cross(a, b, c); s2 = cross(a, b, d); if (sig(s1) == 0 && sig(s2) == 0) return 2; if (sig(s2 - s1) == 0) return 0; p.x = (c.x*s2 - d.x*s1) / (s2 - s1); p.y = (c.y*s2 - d.y*s1) / (s2 - s1); return 1; } //多边形切割 //用直线ab切割多边形p,切割后的在向量(a,b)的左侧,并原地保存切割结果 //如果退化为一个点,也会返回去,此时n为1 __device__ inline void polygon_cut(float2*p, int&n, float2 a, float2 b, float2* pp) { // TODO: The static variable may be the reason, why single thread is ok, // multiple threads are not work // printf("polygon_cut, offset\n"); // static float2 pp[maxn]; int m = 0; p[n] = p[0]; for (int i = 0; i < n; i++) { if (sig(cross(a, b, p[i])) > 0) pp[m++] = p[i]; if (sig(cross(a, b, p[i])) != sig(cross(a, b, p[i + 1]))) lineCross(a, b, p[i], p[i + 1], pp[m++]); } n = 0; for (int i = 0; i < m; i++) if (!i || !(point_eq(pp[i], pp[i - 1]))) p[n++] = pp[i]; // while(n>1&&p[n-1]==p[0])n--; while (n > 1 && point_eq(p[n - 1], p[0]))n--; // int x = blockIdx.x * blockDim.x + threadIdx.x; // // corresponding to k // int y = blockIdx.y * blockDim.y + threadIdx.y; // int offset = x * 1 + y; // printf("polygon_cut, offset\n"); } //---------------华丽的分隔线-----------------// //返回三角形oab和三角形ocd的有向交面积,o是原点// __device__ inline float intersectArea(float2 a, float2 b, float2 c, float2 d) { float2 o = make_float2(0, 0); int s1 = sig(cross(o, a, b)); int s2 = sig(cross(o, c, d)); if (s1 == 0 || s2 == 0)return 0.0;//退化,面积为0 // if(s1==-1) swap(a,b); // if(s2==-1) swap(c,d); // printf("before swap\n"); // printf("a.x %f, a.y %f\n", a.x, a.y); // printf("b.x %f, b.y %f\n", b.x, b.y); if (s1 == -1) point_swap(&a, &b); // printf("a.x %f, a.y %f\n", a.x, a.y); // printf("b.x %f, b.y %f\n", b.x, b.y); // printf("after swap\n"); if (s2 == -1) point_swap(&c, &d); float2 p[10] = { o,a,b }; int n = 3; // // manually implement polygon_cut(p, n, a, b) // float2 pp[maxn]; // // polygon_cut(p, n, o, c) // int m=0;p[n]=p[0]; // for(int i=0;i<n;i++){ // if(sig(cross(o,c,p[i]))>0) pp[m++]=p[i]; // if(sig(cross(o,c,p[i]))!=sig(cross(o,c,p[i+1]))) // lineCross(o,c,p[i],p[i+1],pp[m++]); // } // n=0; // for(int i=0;i<m;i++) // if(!i||!(point_eq(pp[i], pp[i-1]))) // p[n++]=pp[i]; // while(n>1&&point_eq(p[n-1], p[0]))n--; // // polygon_cut(p, n, c, d) // m=0;p[n]=p[0]; // for(int i=0;i<n;i++){ // if(sig(cross(c,d,p[i]))>0) pp[m++]=p[i]; // if(sig(cross(c,d,p[i]))!=sig(cross(c,d,p[i+1]))) // lineCross(c,d,p[i],p[i+1],pp[m++]); // } // n=0; // for(int i=0;i<m;i++) // if(!i||!(point_eq(pp[i], pp[i-1]))) // p[n++]=pp[i]; // while(n>1&&point_eq(p[n-1], p[0]))n--; // // polygon_cut(p, n, d, o) // m=0;p[n]=p[0]; // for(int i=0;i<n;i++){ // if(sig(cross(d,o,p[i]))>0) pp[m++]=p[i]; // if(sig(cross(d,o,p[i]))!=sig(cross(d,o,p[i+1]))) // lineCross(d,o,p[i],p[i+1],pp[m++]); // } // n=0; // for(int i=0;i<m;i++) // if(!i||!(point_eq(pp[i], pp[i-1]))) // p[n++]=pp[i]; // while(n>1&&point_eq(p[n-1], p[0]))n--; float2 pp[maxn]; polygon_cut(p, n, o, c, pp); polygon_cut(p, n, c, d, pp); polygon_cut(p, n, d, o, pp); float res = fabs(area(p, n)); int x = blockIdx.x * blockDim.x + threadIdx.x; // corresponding to k int y = blockIdx.y * blockDim.y + threadIdx.y; int offset = x * 1 + y; // printf("intersectArea2, offset: %d, %f, %f, %f, %f, %f, %f, %f, %f, res: %f\n", offset, a.x, a.y, b.x, b.y, c.x, c.y, d.x, d.y, res); if (s1*s2 == -1) res = -res; return res; } //求两多边形的交面积 // TODO: here changed the input, this need to be debug __device__ inline float intersectArea(float2*ps1, int n1, float2*ps2, int n2) { int x = blockIdx.x * blockDim.x + threadIdx.x; // corresponding to k int y = blockIdx.y * blockDim.y + threadIdx.y; int offset = x * 1 + y; if (area(ps1, n1) < 0) point_reverse(ps1, ps1 + n1); if (area(ps2, n2) < 0) point_reverse(ps2, ps2 + n2); ps1[n1] = ps1[0]; ps2[n2] = ps2[0]; float res = 0; for (int i = 0; i < n1; i++) { for (int j = 0; j < n2; j++) { // printf("offset: %d, %f, %f, %f, %f, %f, %f, %f, %f addArea: %f \n", // offset, ps1[i].x, ps1[i].y, ps1[i + 1].x, ps1[i + 1].y, ps2[j].x, ps2[j].y, // ps2[j + 1].x, ps2[j + 1].y, intersectArea(ps1[i],ps1[i+1],ps2[j],ps2[j+1])); // float2 a = ps1[i]; // float2 b = ps1[i+1]; // float2 c = ps2[j]; // float2 d = ps2[j+1]; // res+=intersectArea2(a,b,c,d); res += intersectArea(ps1[i], ps1[i + 1], ps2[j], ps2[j + 1]); } } return res;//assumeresispositive! } __device__ inline void RotBox2Poly(float const * const dbox, float2 * ps) { float cs = cos(dbox[4]); float ss = sin(dbox[4]); float w = dbox[2]; float h = dbox[3]; float x_ctr = dbox[0]; float y_ctr = dbox[1]; ps[0].x = x_ctr + cs * (w / 2.0) - ss * (-h / 2.0); ps[1].x = x_ctr + cs * (w / 2.0) - ss * (h / 2.0); ps[2].x = x_ctr + cs * (-w / 2.0) - ss * (h / 2.0); ps[3].x = x_ctr + cs * (-w / 2.0) - ss * (-h / 2.0); ps[0].y = y_ctr + ss * (w / 2.0) + cs * (-h / 2.0); ps[1].y = y_ctr + ss * (w / 2.0) + cs * (h / 2.0); ps[2].y = y_ctr + ss * (-w / 2.0) + cs * (h / 2.0); ps[3].y = y_ctr + ss * (-w / 2.0) + cs * (-h / 2.0); } __device__ inline float devPolyIoU(float const * const dbbox1, float const * const dbbox2) { float2 ps1[maxn], ps2[maxn]; int n1 = 4; int n2 = 4; RotBox2Poly(dbbox1, ps1); RotBox2Poly(dbbox2, ps2); // printf("ps1: %f, %f, %f, %f, %f, %f, %f, %f\n", ps1[0].x, ps1[0].y, ps1[1].x, ps1[1].y, ps1[2].x, ps1[2].y, ps1[3].x, ps1[3].y); // printf("ps2: %f, %f, %f, %f, %f, %f, %f, %f\n", ps2[0].x, ps2[0].y, ps2[1].x, ps2[1].y, ps2[2].x, ps2[2].y, ps2[3].x, ps2[3].y); float inter_area = intersectArea(ps1, n1, ps2, n2); // printf("inter_area: %f \n", inter_area); float union_area = fabs(area(ps1, n1)) + fabs(area(ps2, n2)) - inter_area; // printf("before union_area\n"); // printf("union_area: %f \n", union_area); float iou = 0.0f; if (sig(union_area) > 0) iou = inter_area / union_area; // printf("iou: %f \n", iou); return iou; } __global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes, const float * dev_query_boxes, float* dev_overlaps) { // const int col_start = blockIdx.y; // const int row_start = blockIdx.x; // corresponding to n int x = blockIdx.x * blockDim.x + threadIdx.x; // corresponding to k int y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < N) && (y < K)) { int offset = x * K + y; //printf // printf("offset: %d dbbox: %f %f %f %f %f\n", offset, (dev_boxes + x*5)[0], // (dev_boxes + x*5)[1], (dev_boxes + x*5)[2], (dev_boxes + x*5)[3], // (dev_boxes + x*5)[4] ); // printf("offset: %d dbbox: %f %f %f %f %f\n", offset, (dev_query_boxes + y*5)[0], // (dev_query_boxes + y*5)[1], (dev_query_boxes + y*5)[2], (dev_query_boxes + y*5)[3], // (dev_query_boxes + y*5)[4] ); dev_overlaps[offset] = devPolyIoU(dev_boxes + x * 5, dev_query_boxes + y * 5); } } void _set_device(int device_id) { int current_device; CUDA_CHECK(cudaGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to cudaSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(cudaSetDevice(device_id)); } void _overlaps(float* overlaps, const float* boxes, const float* query_boxes, int n, int k, int device_id) { _set_device(device_id); float* overlaps_dev = NULL; float* boxes_dev = NULL; float* query_boxes_dev = NULL; CUDA_CHECK(cudaMalloc(&boxes_dev, n * 5 * sizeof(float))); CUDA_CHECK(cudaMemcpy(boxes_dev, boxes, n * 5 * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&query_boxes_dev, k * 5 * sizeof(float))); CUDA_CHECK(cudaMemcpy(query_boxes_dev, query_boxes, k * 5 * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&overlaps_dev, n * k * sizeof(float))); dim3 blocks(DIVUP(n, 32), DIVUP(k, 32)); dim3 threads(32, 32); // 每个线程块1024个线程 overlaps_kernel<<<blocks, threads>>>(n, k, boxes_dev, query_boxes_dev, overlaps_dev); CUDA_CHECK(cudaMemcpy(overlaps, overlaps_dev, n * k * sizeof(float), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaFree(overlaps_dev)); CUDA_CHECK(cudaFree(boxes_dev)); CUDA_CHECK(cudaFree(query_boxes_dev)); }
1f2413af4647c527482745bb1a8e45479e905c71.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "fix/fix-nonlinear-kernels-ansi.h" namespace kaldi { namespace fix { template <typename Real> __global__ void _mapping(Real* out, const Real* in, const float x_rang, const int * y_arraybin, const int num_p, const float LB, const float UB, float output_amp, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride; int src_index = i + j * src_stride; if (i < d.cols && j < d.rows) { int pos = (int)((in[src_index] / (2 * x_rang) + 0.5) * num_p); if (pos < 0) out[dst_index]=LB; else if (pos >= num_p) out[dst_index]=UB; else { out[dst_index] = Real(y_arraybin[pos]) / output_amp; } } } void cuda_mapping(const dim3 dimGrid, const dim3 dimBlock, float* data, const float* in, const float x_rang, const int * y_arraybin, const int num_p, const float LB, const float UB, float output_amp, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _mapping) , dim3(dimGrid), dim3(dimBlock), 0, 0, data, in, x_rang, y_arraybin, num_p, LB, UB, output_amp, d, src_stride); } void cuda_mapping(const dim3 dimGrid, const dim3 dimBlock, double* data, const double* in, const float x_rang, const int * y_arraybin, const int num_p, const float LB, const float UB, float output_amp, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _mapping) , dim3(dimGrid), dim3(dimBlock), 0, 0, data, in, x_rang, y_arraybin, num_p, LB, UB, output_amp, d, src_stride); } } }
1f2413af4647c527482745bb1a8e45479e905c71.cu
#include "fix/fix-nonlinear-kernels-ansi.h" namespace kaldi { namespace fix { template <typename Real> __global__ void _mapping(Real* out, const Real* in, const float x_rang, const int * y_arraybin, const int num_p, const float LB, const float UB, float output_amp, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride; int src_index = i + j * src_stride; if (i < d.cols && j < d.rows) { int pos = (int)((in[src_index] / (2 * x_rang) + 0.5) * num_p); if (pos < 0) out[dst_index]=LB; else if (pos >= num_p) out[dst_index]=UB; else { out[dst_index] = Real(y_arraybin[pos]) / output_amp; } } } void cuda_mapping(const dim3 dimGrid, const dim3 dimBlock, float* data, const float* in, const float x_rang, const int * y_arraybin, const int num_p, const float LB, const float UB, float output_amp, MatrixDim d, int src_stride) { _mapping <<<dimGrid, dimBlock>>>(data, in, x_rang, y_arraybin, num_p, LB, UB, output_amp, d, src_stride); } void cuda_mapping(const dim3 dimGrid, const dim3 dimBlock, double* data, const double* in, const float x_rang, const int * y_arraybin, const int num_p, const float LB, const float UB, float output_amp, MatrixDim d, int src_stride) { _mapping <<<dimGrid, dimBlock>>>(data, in, x_rang, y_arraybin, num_p, LB, UB, output_amp, d, src_stride); } } }
4c5821ba89a6d68db96c39f8cf4bf0241bd95766.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * */ #include <stdio.h> #include <iostream> #include<sys/time.h> using namespace std; #define N (200) void add_cpu(int *a, int *b, int *c) { int tid = 0; while(tid < N) { c[tid] = a[tid] + b[tid]; tid += 1; /* code */ } } __global__ void add(int *a, int *b, int *c) { int tid = blockIdx.x; while(tid < N) { c[tid] = a[tid] + b[tid]; tid += 1; /* code */ } } // CPU int main_cpu() { int a[N], b[N], c[N]; struct timeval tv1, tv2; for (int i = 0; i < N; i++) { a[i] = -i; b[i] = i*i; } gettimeofday(&tv1, NULL); add_cpu(a, b, c); gettimeofday(&tv2, NULL); float time = (1000000 * (tv2.tv_sec - tv1.tv_sec) + tv2.tv_usec- tv1.tv_usec)/1000.0; cout << "time cpu " << time << "ms, num : " << c[N-1] << endl; return 0; } // GPU int main(int argc, char const *argv[]) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; struct timeval tv1, tv2; hipMalloc((void**)&dev_a, N * sizeof(int)); hipMalloc((void**)&dev_b, N * sizeof(int)); hipMalloc((void**)&dev_c, N * sizeof(int)); // CPU a/b // CPUGPU // // CPU for(unsigned i = 0; i < N; ++i) { a[i] = -i; b[i] = i*i; } hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_c, c, N * sizeof(int), hipMemcpyHostToDevice); gettimeofday(&tv1, NULL); // kernel<<<1,1>>>gpu11 // <<<256,1>>>gpu2561, // GPUN // // int tid = blockIdx.x // tid, blockIdx cuda // // // int tid = blockIdx cuda // hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, dev_a, dev_b, dev_c); gettimeofday(&tv2, NULL); float time = (1000000 * (tv2.tv_sec - tv1.tv_sec) + tv2.tv_usec- tv1.tv_usec)/1000.0; cout << "time gpu " << time << "ms"; hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost); cout << ", num : " << c[N-1] << endl; hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); main_cpu(); /* code */ return 0; } // time gpu 0.048ms // time cpu 1.248
4c5821ba89a6d68db96c39f8cf4bf0241bd95766.cu
/** * 并行计算 */ #include <stdio.h> #include <iostream> #include<sys/time.h> using namespace std; #define N (200) void add_cpu(int *a, int *b, int *c) { int tid = 0; while(tid < N) { c[tid] = a[tid] + b[tid]; tid += 1; /* code */ } } __global__ void add(int *a, int *b, int *c) { int tid = blockIdx.x; while(tid < N) { c[tid] = a[tid] + b[tid]; tid += 1; /* code */ } } // CPU 求和 int main_cpu() { int a[N], b[N], c[N]; struct timeval tv1, tv2; for (int i = 0; i < N; i++) { a[i] = -i; b[i] = i*i; } gettimeofday(&tv1, NULL); add_cpu(a, b, c); gettimeofday(&tv2, NULL); float time = (1000000 * (tv2.tv_sec - tv1.tv_sec) + tv2.tv_usec- tv1.tv_usec)/1000.0; cout << "time cpu: " << time << "ms, num : " << c[N-1] << endl; return 0; } // GPU 求和 int main(int argc, char const *argv[]) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; struct timeval tv1, tv2; cudaMalloc((void**)&dev_a, N * sizeof(int)); cudaMalloc((void**)&dev_b, N * sizeof(int)); cudaMalloc((void**)&dev_c, N * sizeof(int)); // 在CPU上为数组 a/b赋值 // 这里在CPU就给输出数据赋初值,并没有特殊原因。事实上,如果在GPU上对数组赋值,这个步骤执行的会更快。 // 但是这段代码的目的是说明如何在显卡上实现两个矢量的加法运算,因此我们仅仅将计算部分放在显卡上实现, // 输入则在CPU上进行。 for(unsigned i = 0; i < N; ++i) { a[i] = -i; b[i] = i*i; } cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_c, c, N * sizeof(int), cudaMemcpyHostToDevice); gettimeofday(&tv1, NULL); // 调用kernel函数,<<<1,1>>>指gpu启动1个线程块,每个线程块中有1个线程 // <<<256,1>>>指gpu启动256个线程块,每个线程块中有1个线程, 如果是这样,就会有一个问题: // 既然GPU将运行核函数的N个副本,那如何在代码中知道当前正在运行的是哪一个线程块? // 这个问题可以在代码中找出答案: // int tid = blockIdx.x // 乍一看,将一个没有定义的变量赋值给了变量tid,但是 blockIdx 是一个内置变量,在cuda运行是中已经定义了。 // 这个变量把包含的值就是当前执行设备代码的的线程块索引。 // // 问题又来了,为什么不是写 int tid = blockIdx呢? 事实上,这是因为cuda支持二维的线程块数组,对于二维空间的计算问题, // 例如矩阵数学运算或者图像处理,使用二维索引往往回答来很大的便利,因为他可以避免将线性索引转换为矩形索引。 add<<<1, 1>>>(dev_a, dev_b, dev_c); gettimeofday(&tv2, NULL); float time = (1000000 * (tv2.tv_sec - tv1.tv_sec) + tv2.tv_usec- tv1.tv_usec)/1000.0; cout << "time gpu: " << time << "ms"; cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost); cout << ", num : " << c[N-1] << endl; cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); main_cpu(); /* code */ return 0; } // time gpu: 0.048ms // time cpu: 1.248
1fad0b5d6724f003a0a778c9bdb2e28399db59dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "headers.h" #include <stdio.h> #include <stdlib.h> __constant__ static int c_eN; __constant__ static int c_tPB; __constant__ static int c_ePT; __constant__ static KeyData constant_key; static int filePartSize; static int eN; // elementsNumber, element == 2 array cells static const int mTPB = 512; // maxThreadsPerBlock static const int mBC = 512; // maxBlocksCount static int ePT = 500; // elementsPerThread ADJUSTABLE static int tTN; // totalThreadsNeeded static int bC; // blocks Count CALCULATED: min possible static int tPB; // threads Per Block CALCULATED: max possible static double kernelTime; static hipEvent_t start, stop; void cudaInit(const int _filePartSize, const KeyData * key, const int _ePT) { filePartSize = _filePartSize; ePT = _ePT; if (filePartSize % 2 != 0) { char communicate[200]; sprintf(communicate, "Unable to execute CUDA kernel launch with:\n" "fPS %d\nePT %d\nbC %d\ntPB %d\n" "filePartSize must be divisible by 2", filePartSize, ePT, bC, tPB); showErrAndQuit(communicate); } eN = filePartSize / 2; tTN = (eN + ePT - 1) / ePT; // totalThreadsNeeded bC = (tTN + mTPB - 1) / mTPB; // blocks Count CALCULATED: min possible tPB = (tTN + bC - 1) / bC; // threads Per Block CALCULATED: max possible if (bC > mBC) { char communicate[200]; sprintf(communicate, "Unable to execute CUDA kernel launch with:\n" "fPS %d\nePT %d\nbC %d\ntPB %d\n" "Total threads needed exceeds maximum available (512^2)", filePartSize, ePT, bC, tPB); showErrAndQuit(communicate); } hipMemcpyToSymbol(constant_key, key, sizeof(KeyData)); hipMemcpyToSymbol(c_eN, &eN, sizeof(int)); hipMemcpyToSymbol(c_tPB, &tPB, sizeof(int)); hipMemcpyToSymbol(c_ePT, &ePT, sizeof(int)); hipEventCreate(&start); hipEventCreate(&stop); kernelTime = 0; // explicitly } void cudaPrintStats(int verbose) { hipEventDestroy(start); hipEventDestroy(stop); if (verbose) { printf("total CUDA kernel time:\t%f ms\n", kernelTime); printf("CUDA kernel launches parameters:\n" "eN\t%d\n" "ePT\t%d\n" "tTN\t%d\n" "bC\t%d\n" "tPB\t%d\n", eN, ePT, tTN, bC, tPB); } else { printf("%f\t%d\t%d\t%d\t%d\t%d\t", kernelTime, eN, ePT, tTN, bC, tPB); } } __device__ __inline__ ulong d_F(ulong a) { // ulong FF = 0xFFL; // introduced as hardcoded constant return ((constant_key.sbox[0][(a >> 24) & 0xFFL] + constant_key.sbox[1][(a >> 16) & 0xFFL]) ^ (constant_key.sbox[2][(a >> 8) & 0xFFL])) + constant_key.sbox[3][(a) & 0xFFL]; } __device__ void d_encryptBlock(ulong *l, ulong *r) { int a; for (int i = 0; i < 16; ++i) { *l = (*l) ^ (constant_key.p[i]); *r = d_F(*l) ^ (*r); a = *l; *l = *r; *r = a; } a = *l; *l = *r; *r = a; *r = (*r) ^ (constant_key.p[16]); *l = (*l) ^ (constant_key.p[17]); } __device__ void d_decryptBlock(ulong *l, ulong *r) { int a; *l = (*l) ^ (constant_key.p[17]); *r = (*r) ^ (constant_key.p[16]); a = *l; *l = *r; *r = a; for (int i = 15; i >= 0; --i) { a = *l; *l = *r; *r = a; *r = d_F(*l) ^ (*r); *l = (*l) ^ (constant_key.p[i]); } } __global__ void d_encryptBuffer(uint *buffer) { ulong ll, lr; // const int limit = c_ePT * (c_tPB * blockIdx.x + threadIdx.x + 1); for (int i = c_ePT * (c_tPB * blockIdx.x + threadIdx.x); i < min(c_ePT * (c_tPB * blockIdx.x + threadIdx.x + 1), c_eN); ++i) { ll = (ulong) buffer[2 * i]; lr = (ulong) buffer[2 * i + 1]; d_encryptBlock(&ll, &lr); buffer[2 * i] = (uint) ll; buffer[2 * i + 1] = (uint) lr; } } __global__ void d_decryptBuffer(uint *buffer) { ulong ll, lr; // const int limit = min(c_ePT * (c_tPB * blockIdx.x + threadIdx.x + 1), c_eN); for (int i = c_ePT * (c_tPB * blockIdx.x + threadIdx.x); i < min(c_ePT * (c_tPB * blockIdx.x + threadIdx.x + 1), c_eN); ++i) { ll = (ulong) buffer[2 * i]; lr = (ulong) buffer[2 * i + 1]; d_decryptBlock(&ll, &lr); buffer[2 * i] = (uint) ll; buffer[2 * i + 1] = (uint) lr; } } void cudaEncryptBuffer(const uint * const bufferIn, uint * const bufferOut) { static uint * d_buffer = NULL; if (d_buffer == NULL) { CUDA_CHECK_RETURN(hipMalloc((void**) &d_buffer, sizeof(uint) * filePartSize)); } CUDA_CHECK_RETURN(hipMemcpy(d_buffer, bufferIn, sizeof(uint) * filePartSize, hipMemcpyHostToDevice)); hipEventRecord(start, 0); hipLaunchKernelGGL(( d_encryptBuffer), dim3(bC), dim3(tPB), 0, 0, d_buffer); hipEventRecord(stop, 0); hipEventSynchronize(stop); float time; hipEventElapsedTime(&time, start, stop); kernelTime += (double) time; CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); CUDA_CHECK_RETURN(hipMemcpy(bufferOut, d_buffer, sizeof(int) * filePartSize, hipMemcpyDeviceToHost)); // CUDA_CHECK_RETURN(hipFree((void*) d_buffer)); // FIXME : never frees } void cudaDecryptBuffer(const uint * const bufferIn, uint * const bufferOut) { static uint * d_buffer = NULL; if (d_buffer == NULL) { CUDA_CHECK_RETURN(hipMalloc((void**) &d_buffer, sizeof(uint) * filePartSize)); } CUDA_CHECK_RETURN(hipMemcpy(d_buffer, bufferIn, sizeof(uint) * filePartSize, hipMemcpyHostToDevice)); hipEventRecord(start, 0); hipLaunchKernelGGL(( d_decryptBuffer), dim3(bC), dim3(tPB), 0, 0, d_buffer); hipEventRecord(stop, 0); hipEventSynchronize(stop); float time; hipEventElapsedTime(&time, start, stop); kernelTime += (double) time; CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); CUDA_CHECK_RETURN(hipMemcpy(bufferOut, d_buffer, sizeof(int) * filePartSize, hipMemcpyDeviceToHost)); // CUDA_CHECK_RETURN(hipFree((void*) d_buffer)); // FIXME : never frees }
1fad0b5d6724f003a0a778c9bdb2e28399db59dd.cu
#include "headers.h" #include <stdio.h> #include <stdlib.h> __constant__ static int c_eN; __constant__ static int c_tPB; __constant__ static int c_ePT; __constant__ static KeyData constant_key; static int filePartSize; static int eN; // elementsNumber, element == 2 array cells static const int mTPB = 512; // maxThreadsPerBlock static const int mBC = 512; // maxBlocksCount static int ePT = 500; // elementsPerThread ADJUSTABLE static int tTN; // totalThreadsNeeded static int bC; // blocks Count CALCULATED: min possible static int tPB; // threads Per Block CALCULATED: max possible static double kernelTime; static cudaEvent_t start, stop; void cudaInit(const int _filePartSize, const KeyData * key, const int _ePT) { filePartSize = _filePartSize; ePT = _ePT; if (filePartSize % 2 != 0) { char communicate[200]; sprintf(communicate, "Unable to execute CUDA kernel launch with:\n" "fPS %d\nePT %d\nbC %d\ntPB %d\n" "filePartSize must be divisible by 2", filePartSize, ePT, bC, tPB); showErrAndQuit(communicate); } eN = filePartSize / 2; tTN = (eN + ePT - 1) / ePT; // totalThreadsNeeded bC = (tTN + mTPB - 1) / mTPB; // blocks Count CALCULATED: min possible tPB = (tTN + bC - 1) / bC; // threads Per Block CALCULATED: max possible if (bC > mBC) { char communicate[200]; sprintf(communicate, "Unable to execute CUDA kernel launch with:\n" "fPS %d\nePT %d\nbC %d\ntPB %d\n" "Total threads needed exceeds maximum available (512^2)", filePartSize, ePT, bC, tPB); showErrAndQuit(communicate); } cudaMemcpyToSymbol(constant_key, key, sizeof(KeyData)); cudaMemcpyToSymbol(c_eN, &eN, sizeof(int)); cudaMemcpyToSymbol(c_tPB, &tPB, sizeof(int)); cudaMemcpyToSymbol(c_ePT, &ePT, sizeof(int)); cudaEventCreate(&start); cudaEventCreate(&stop); kernelTime = 0; // explicitly } void cudaPrintStats(int verbose) { cudaEventDestroy(start); cudaEventDestroy(stop); if (verbose) { printf("total CUDA kernel time:\t%f ms\n", kernelTime); printf("CUDA kernel launches parameters:\n" "eN\t%d\n" "ePT\t%d\n" "tTN\t%d\n" "bC\t%d\n" "tPB\t%d\n", eN, ePT, tTN, bC, tPB); } else { printf("%f\t%d\t%d\t%d\t%d\t%d\t", kernelTime, eN, ePT, tTN, bC, tPB); } } __device__ __inline__ ulong d_F(ulong a) { // ulong FF = 0xFFL; // introduced as hardcoded constant return ((constant_key.sbox[0][(a >> 24) & 0xFFL] + constant_key.sbox[1][(a >> 16) & 0xFFL]) ^ (constant_key.sbox[2][(a >> 8) & 0xFFL])) + constant_key.sbox[3][(a) & 0xFFL]; } __device__ void d_encryptBlock(ulong *l, ulong *r) { int a; for (int i = 0; i < 16; ++i) { *l = (*l) ^ (constant_key.p[i]); *r = d_F(*l) ^ (*r); a = *l; *l = *r; *r = a; } a = *l; *l = *r; *r = a; *r = (*r) ^ (constant_key.p[16]); *l = (*l) ^ (constant_key.p[17]); } __device__ void d_decryptBlock(ulong *l, ulong *r) { int a; *l = (*l) ^ (constant_key.p[17]); *r = (*r) ^ (constant_key.p[16]); a = *l; *l = *r; *r = a; for (int i = 15; i >= 0; --i) { a = *l; *l = *r; *r = a; *r = d_F(*l) ^ (*r); *l = (*l) ^ (constant_key.p[i]); } } __global__ void d_encryptBuffer(uint *buffer) { ulong ll, lr; // const int limit = c_ePT * (c_tPB * blockIdx.x + threadIdx.x + 1); for (int i = c_ePT * (c_tPB * blockIdx.x + threadIdx.x); i < min(c_ePT * (c_tPB * blockIdx.x + threadIdx.x + 1), c_eN); ++i) { ll = (ulong) buffer[2 * i]; lr = (ulong) buffer[2 * i + 1]; d_encryptBlock(&ll, &lr); buffer[2 * i] = (uint) ll; buffer[2 * i + 1] = (uint) lr; } } __global__ void d_decryptBuffer(uint *buffer) { ulong ll, lr; // const int limit = min(c_ePT * (c_tPB * blockIdx.x + threadIdx.x + 1), c_eN); for (int i = c_ePT * (c_tPB * blockIdx.x + threadIdx.x); i < min(c_ePT * (c_tPB * blockIdx.x + threadIdx.x + 1), c_eN); ++i) { ll = (ulong) buffer[2 * i]; lr = (ulong) buffer[2 * i + 1]; d_decryptBlock(&ll, &lr); buffer[2 * i] = (uint) ll; buffer[2 * i + 1] = (uint) lr; } } void cudaEncryptBuffer(const uint * const bufferIn, uint * const bufferOut) { static uint * d_buffer = NULL; if (d_buffer == NULL) { CUDA_CHECK_RETURN(cudaMalloc((void**) &d_buffer, sizeof(uint) * filePartSize)); } CUDA_CHECK_RETURN(cudaMemcpy(d_buffer, bufferIn, sizeof(uint) * filePartSize, cudaMemcpyHostToDevice)); cudaEventRecord(start, 0); d_encryptBuffer<<<bC, tPB>>>(d_buffer); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float time; cudaEventElapsedTime(&time, start, stop); kernelTime += (double) time; CUDA_CHECK_RETURN(cudaThreadSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); CUDA_CHECK_RETURN(cudaMemcpy(bufferOut, d_buffer, sizeof(int) * filePartSize, cudaMemcpyDeviceToHost)); // CUDA_CHECK_RETURN(cudaFree((void*) d_buffer)); // FIXME : never frees } void cudaDecryptBuffer(const uint * const bufferIn, uint * const bufferOut) { static uint * d_buffer = NULL; if (d_buffer == NULL) { CUDA_CHECK_RETURN(cudaMalloc((void**) &d_buffer, sizeof(uint) * filePartSize)); } CUDA_CHECK_RETURN(cudaMemcpy(d_buffer, bufferIn, sizeof(uint) * filePartSize, cudaMemcpyHostToDevice)); cudaEventRecord(start, 0); d_decryptBuffer<<<bC, tPB>>>(d_buffer); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float time; cudaEventElapsedTime(&time, start, stop); kernelTime += (double) time; CUDA_CHECK_RETURN(cudaThreadSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); CUDA_CHECK_RETURN(cudaMemcpy(bufferOut, d_buffer, sizeof(int) * filePartSize, cudaMemcpyDeviceToHost)); // CUDA_CHECK_RETURN(cudaFree((void*) d_buffer)); // FIXME : never frees }
48c12b8a0a734f5deb391b600b7f3baaee04f1da.hip
// !!! This is a file automatically generated by hipify!!! #include "kernels.h" #include <ctime> #include <sys/time.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "helper_cuda.h" /***** CUDA Kernels *****/ __global__ void setupRandomVectorGen(hiprandState_t* state, unsigned long seed, unsigned int numElems) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id >= numElems) return; hiprand_init(seed, id, 0, &(state[id])); } __global__ void runRandomVectorGen(float* vec, hiprandState_t* globalState, float threshold, unsigned int numElems) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id >= numElems) return; hiprandState_t localState = globalState[id]; float rndVal = hiprand_uniform(&localState); vec[id] = (rndVal * 2 * threshold) - threshold; } __global__ void updateParams(float* params, float* derivatives, float* weights, float learnRate, unsigned int numElems) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id >= numElems) return; float epsilon = 0.0001; weights[id] += derivatives[id]*derivatives[id]; params[id] -= (learnRate * derivatives[id])/(sqrt(weights[id]) + epsilon); } /***** Kernel Wrappers *****/ void kernelRandomwordVecs(ParamMem_t& params, float threshold) { timeval tim; gettimeofday(&tim, NULL); double t1=tim.tv_sec+(tim.tv_usec/1000000.0); unsigned int blockSize = 1024; unsigned int numElems = params.nWords * params.wordVecDim; unsigned int numBlocks = numElems / blockSize + 1; dim3 threadsPerBlock(blockSize, 1, 1); hiprandState_t* devState; checkCudaErrors(hipMalloc((void**)&devState, numElems*sizeof(hiprandState_t))); hipLaunchKernelGGL(( setupRandomVectorGen), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, devState, time(NULL), numElems); hipLaunchKernelGGL(( runRandomVectorGen), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, params.wordVecs, devState, threshold, numElems); checkCudaErrors(hipFree(devState)); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("Random word vectors time: %f\n", t2-t1); } void kernelUpdateParams(ParamMem_t& params, ParamMem_t& derivatives, ParamMem_t& adagradWeights, float learnRate) { timeval tim; gettimeofday(&tim, NULL); double t1=tim.tv_sec+(tim.tv_usec/1000000.0); unsigned int blockSize = 1024; unsigned int numElems = params.totalSize; unsigned int numBlocks = numElems / blockSize + 1; dim3 threadsPerBlock(blockSize, 1, 1); hipLaunchKernelGGL(( updateParams), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, params.base, derivatives.base, adagradWeights.base, learnRate, params.totalSize); //checkCudaErrors(hipGetLastError()); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("Update params time: %f\n", t2-t1); }
48c12b8a0a734f5deb391b600b7f3baaee04f1da.cu
#include "kernels.h" #include <ctime> #include <sys/time.h> #include <cuda_runtime.h> #include <cublas_v2.h> #include <curand.h> #include <curand_kernel.h> #include "helper_cuda.h" /***** CUDA Kernels *****/ __global__ void setupRandomVectorGen(curandState* state, unsigned long seed, unsigned int numElems) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id >= numElems) return; curand_init(seed, id, 0, &(state[id])); } __global__ void runRandomVectorGen(float* vec, curandState* globalState, float threshold, unsigned int numElems) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id >= numElems) return; curandState localState = globalState[id]; float rndVal = curand_uniform(&localState); vec[id] = (rndVal * 2 * threshold) - threshold; } __global__ void updateParams(float* params, float* derivatives, float* weights, float learnRate, unsigned int numElems) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id >= numElems) return; float epsilon = 0.0001; weights[id] += derivatives[id]*derivatives[id]; params[id] -= (learnRate * derivatives[id])/(sqrt(weights[id]) + epsilon); } /***** Kernel Wrappers *****/ void kernelRandomwordVecs(ParamMem_t& params, float threshold) { timeval tim; gettimeofday(&tim, NULL); double t1=tim.tv_sec+(tim.tv_usec/1000000.0); unsigned int blockSize = 1024; unsigned int numElems = params.nWords * params.wordVecDim; unsigned int numBlocks = numElems / blockSize + 1; dim3 threadsPerBlock(blockSize, 1, 1); curandState* devState; checkCudaErrors(cudaMalloc((void**)&devState, numElems*sizeof(curandState))); setupRandomVectorGen<<<numBlocks, threadsPerBlock>>>(devState, time(NULL), numElems); runRandomVectorGen<<<numBlocks, threadsPerBlock>>>(params.wordVecs, devState, threshold, numElems); checkCudaErrors(cudaFree(devState)); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("Random word vectors time: %f\n", t2-t1); } void kernelUpdateParams(ParamMem_t& params, ParamMem_t& derivatives, ParamMem_t& adagradWeights, float learnRate) { timeval tim; gettimeofday(&tim, NULL); double t1=tim.tv_sec+(tim.tv_usec/1000000.0); unsigned int blockSize = 1024; unsigned int numElems = params.totalSize; unsigned int numBlocks = numElems / blockSize + 1; dim3 threadsPerBlock(blockSize, 1, 1); updateParams<<<numBlocks,threadsPerBlock>>>(params.base, derivatives.base, adagradWeights.base, learnRate, params.totalSize); //checkCudaErrors(cudaGetLastError()); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("Update params time: %f\n", t2-t1); }
cd1234a1df8a7f4c0a649e9420d069c6003a7d24.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <sys/time.h> float *A,*L,*U,*input; void arrayInit(int n); void verifyLU(int n); void updateLU(int n); void freemem(int n); __global__ void scale( float *a, int size, int c) { int index=c,k=0;//size=b for(k=index+1;k<size;k++) { a[size*index + k] = (float) a[size*index + k] / a[size*index + index]; } } __global__ void reduce( float *a, int size, int c) { int tid = blockIdx.x; //Handle the data at the index int index=c,j=0;//size=b for(j=index+1;j<size;j++) { a[((tid+index+1)*size + j)] = (float)(a[((tid+index+1)*size + j)] - (float)a[((tid+index+1)*size+index)] * a[((index*size) + j)]); } } int main(int argc,char **argv){ float *dev_a; int i,p,q,n=0; struct timeval bef,aft; long duration=0; n = atoi(argv[1]); // obtain the size of matrix A = (float *) malloc(sizeof(float) * n * n); L = (float *) malloc(sizeof(float) * n * n); U = (float *) malloc(sizeof(float) * n * n); input = (float *) malloc(sizeof(float*) * n * n); //allocate the memory on the GPU hipMalloc ((void**)&dev_a,n * n * sizeof(float)); arrayInit(n); // initialize the array //copy the arrays 'a' and 'b' to the GPU hipMemcpy( dev_a, A, n * n * sizeof(float), hipMemcpyHostToDevice); gettimeofday(&bef,NULL); for(i = 0;i<n;i++) { hipLaunchKernelGGL(( scale), dim3(1), dim3(1), 0, 0, dev_a, n, i); hipLaunchKernelGGL(( reduce), dim3(n-i-1),dim3(1), 0, 0, dev_a, n, i); } gettimeofday(&aft,NULL); duration = aft.tv_sec - bef.tv_sec; printf("%ld --- %d \n",duration,n); //copy the array 'c' back from the GPU to the CPU hipMemcpy( A, dev_a, n * n * sizeof(float),hipMemcpyDeviceToHost ); //update the array and display the results printf("\n"); updateLU(n); //free the memory allocated on the GPU hipFree( dev_a ); return 0; } void updateLU(int n) { int i=0,j=0; for(i=0;i<n;i++) { for(j=i+1;j<n;j++) { U[i*n + j] = A[i*n + j]; } } for(i=0;i<n;i++) { for(j=0;j<i+1;j++) { L[i*n + j] = A[i*n + j]; } } verifyLU(n); } void arrayInit(int n) { int i=0,j=0; /* Initialize the Random Number Generator*/ for(i=0;i<n;i++) { for(j=0;j<n;j++) { A[i*n + j] = (rand() % 5) + 1.0; input[i*n + j] = A[i*n + j]; L[i*n + j] = 0.0f; if(i == j) { U[i*n + j] = 1.0f; } else { U[i*n + j] = 0.0f; } } } } /* * Performs the Multiplication of Lower and Upper Matricies and verify * the result of the reconstructed Matrix. */ void verifyLU(int n) { int i=0,j=0,k=0; float sum=0,error=0; for(i=0;i<n;i++) { for(j=0;j<n;j++) { for(k=0;k<n;k++) { sum += L[i*n + k]*U[k*n + j]; } A[i*n + j] = sum; error += input[i*n + j] - A[i*n + j]; sum=0; } } //printf(" The error is %lf \n",error); /* PRINT OUT VERIFIED MATRIX */ printf("\n REST MATX \n"); /* for(i=0;i<n;i++) { for(j=0;j<n;j++) { printf("%lf ",A[i*n + j]); } printf("\n"); } for(i=0;i<n;i++) { for(j=0;j<n;j++) { printf("%lf ",input[i*n + j]); } printf("\n"); } */ if(error != error || error < 1 || error > -1) { printf("Success \n); } } void freemem(int n) { int i=0; for (i = 0; i < n; i++) { float * pt = A; float * ptl = L; float * ptu = U; free(pt); free(ptl); free(ptu); } }
cd1234a1df8a7f4c0a649e9420d069c6003a7d24.cu
#include <cuda.h> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <sys/time.h> float *A,*L,*U,*input; void arrayInit(int n); void verifyLU(int n); void updateLU(int n); void freemem(int n); __global__ void scale( float *a, int size, int c) { int index=c,k=0;//size=b for(k=index+1;k<size;k++) { a[size*index + k] = (float) a[size*index + k] / a[size*index + index]; } } __global__ void reduce( float *a, int size, int c) { int tid = blockIdx.x; //Handle the data at the index int index=c,j=0;//size=b for(j=index+1;j<size;j++) { a[((tid+index+1)*size + j)] = (float)(a[((tid+index+1)*size + j)] - (float)a[((tid+index+1)*size+index)] * a[((index*size) + j)]); } } int main(int argc,char **argv){ float *dev_a; int i,p,q,n=0; struct timeval bef,aft; long duration=0; n = atoi(argv[1]); // obtain the size of matrix A = (float *) malloc(sizeof(float) * n * n); L = (float *) malloc(sizeof(float) * n * n); U = (float *) malloc(sizeof(float) * n * n); input = (float *) malloc(sizeof(float*) * n * n); //allocate the memory on the GPU cudaMalloc ((void**)&dev_a,n * n * sizeof(float)); arrayInit(n); // initialize the array //copy the arrays 'a' and 'b' to the GPU cudaMemcpy( dev_a, A, n * n * sizeof(float), cudaMemcpyHostToDevice); gettimeofday(&bef,NULL); for(i = 0;i<n;i++) { scale<<<1, 1>>> (dev_a, n, i); reduce<<<n-i-1,1>>>(dev_a, n, i); } gettimeofday(&aft,NULL); duration = aft.tv_sec - bef.tv_sec; printf("%ld --- %d \n",duration,n); //copy the array 'c' back from the GPU to the CPU cudaMemcpy( A, dev_a, n * n * sizeof(float),cudaMemcpyDeviceToHost ); //update the array and display the results printf("\n"); updateLU(n); //free the memory allocated on the GPU cudaFree( dev_a ); return 0; } void updateLU(int n) { int i=0,j=0; for(i=0;i<n;i++) { for(j=i+1;j<n;j++) { U[i*n + j] = A[i*n + j]; } } for(i=0;i<n;i++) { for(j=0;j<i+1;j++) { L[i*n + j] = A[i*n + j]; } } verifyLU(n); } void arrayInit(int n) { int i=0,j=0; /* Initialize the Random Number Generator*/ for(i=0;i<n;i++) { for(j=0;j<n;j++) { A[i*n + j] = (rand() % 5) + 1.0; input[i*n + j] = A[i*n + j]; L[i*n + j] = 0.0f; if(i == j) { U[i*n + j] = 1.0f; } else { U[i*n + j] = 0.0f; } } } } /* * Performs the Multiplication of Lower and Upper Matricies and verify * the result of the reconstructed Matrix. */ void verifyLU(int n) { int i=0,j=0,k=0; float sum=0,error=0; for(i=0;i<n;i++) { for(j=0;j<n;j++) { for(k=0;k<n;k++) { sum += L[i*n + k]*U[k*n + j]; } A[i*n + j] = sum; error += input[i*n + j] - A[i*n + j]; sum=0; } } //printf(" The error is %lf \n",error); /* PRINT OUT VERIFIED MATRIX */ printf("\n REST MATX \n"); /* for(i=0;i<n;i++) { for(j=0;j<n;j++) { printf("%lf ",A[i*n + j]); } printf("\n"); } for(i=0;i<n;i++) { for(j=0;j<n;j++) { printf("%lf ",input[i*n + j]); } printf("\n"); } */ if(error != error || error < 1 || error > -1) { printf("Success \n”); } } void freemem(int n) { int i=0; for (i = 0; i < n; i++) { float * pt = A; float * ptl = L; float * ptu = U; free(pt); free(ptl); free(ptu); } }
155ea804b81b7c0ca67f34ba660d0e7d72dbe9f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <CudaParticle_Kernel.cuh> #include <double3.h> extern "C" { /****************************************************************************************************/ // collide two spheres using DEM method /****************************************************************************************************/ __device__ double3 collideParticle(double3 posA, double3 posB, double3 velA, double3 velB, double radiusA, double radiusB, double spring, double damping, double shear, double attraction) { // calculate relative position double3 relPos = posB - posA; double dist = length(relPos); double3 force = make_double3(0,0,0); if(dist>0){ float collideDist = (radiusA + radiusB); if (dist < collideDist*2) { double3 norm = relPos / dist; // relative velocity double3 relVel = velB - velA; double3 tanVel = relVel - dot(relVel, norm) * norm; // spring force force = -spring*(collideDist - dist) * norm; // dashpot (damping) force force += damping*relVel; // tangential shear force force += shear*tanVel; // attraction force += attraction*relPos; } } return force; } /****************************************************************************************************/ /****************************************************************************************************/ __global__ void collideParticles(uint nbBodies, double3* oldPos, double3* oldVel, double3* forces, double *radius, double *spring, double *damping, double* shear, double* attraction, partVoisine voisines) { int indexA = __umul24(blockIdx.x,blockDim.x) + threadIdx.x; if(indexA < nbBodies){ for(unsigned int i=0;i<voisines.nbVoisines[indexA];i++){ int indexB = voisines.listeVoisine[(indexA*200)+i]; forces[indexA] = forces[indexA] + collideParticle(oldPos[indexA],oldPos[indexB],oldVel[indexA],oldVel[indexB], radius[indexA],radius[indexB], spring[indexA],damping[indexA],shear[indexA],attraction[indexA]); } } } /****************************************************************************************************/ /****************************************************************************************************/ }
155ea804b81b7c0ca67f34ba660d0e7d72dbe9f0.cu
#include <stdio.h> #include <CudaParticle_Kernel.cuh> #include <double3.h> extern "C" { /****************************************************************************************************/ // collide two spheres using DEM method /****************************************************************************************************/ __device__ double3 collideParticle(double3 posA, double3 posB, double3 velA, double3 velB, double radiusA, double radiusB, double spring, double damping, double shear, double attraction) { // calculate relative position double3 relPos = posB - posA; double dist = length(relPos); double3 force = make_double3(0,0,0); if(dist>0){ float collideDist = (radiusA + radiusB); if (dist < collideDist*2) { double3 norm = relPos / dist; // relative velocity double3 relVel = velB - velA; double3 tanVel = relVel - dot(relVel, norm) * norm; // spring force force = -spring*(collideDist - dist) * norm; // dashpot (damping) force force += damping*relVel; // tangential shear force force += shear*tanVel; // attraction force += attraction*relPos; } } return force; } /****************************************************************************************************/ /****************************************************************************************************/ __global__ void collideParticles(uint nbBodies, double3* oldPos, double3* oldVel, double3* forces, double *radius, double *spring, double *damping, double* shear, double* attraction, partVoisine voisines) { int indexA = __umul24(blockIdx.x,blockDim.x) + threadIdx.x; if(indexA < nbBodies){ for(unsigned int i=0;i<voisines.nbVoisines[indexA];i++){ int indexB = voisines.listeVoisine[(indexA*200)+i]; forces[indexA] = forces[indexA] + collideParticle(oldPos[indexA],oldPos[indexB],oldVel[indexA],oldVel[indexB], radius[indexA],radius[indexB], spring[indexA],damping[indexA],shear[indexA],attraction[indexA]); } } } /****************************************************************************************************/ /****************************************************************************************************/ }
807a36c6c22f3d7dba6f21a046c6dcae079ec486.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/roi_align/roi_align.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "src/cuda/roi_align/roi_align.cuh" #include <cfloat> #include "megdnn/dtype.h" #include "src/common/roi_align_helper.h" #include "src/cuda/query_blocksize.cuh" #include "src/cuda/utils.cuh" using namespace megdnn; using namespace roi_align; namespace megdnn { namespace cuda { namespace roi_align { #define CUDA_KERNEL_LOOP(vtid, vthreads) \ for (int vtid = blockIdx.x * blockDim.x + threadIdx.x; vtid < vthreads; \ vtid += blockDim.x * gridDim.x) template <typename T, typename Pooler> __global__ void forward_kernel(const int nthreads, const T* bottom_data, const float spatial_scale, const float offset, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sample_height, const int sample_width, const T* bottom_rois, T* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale - offset; float roi_start_h = bottom_rois[2] * spatial_scale - offset; float roi_end_w = bottom_rois[3] * spatial_scale - offset; float roi_end_h = bottom_rois[4] * spatial_scale - offset; // Force malformed ROIs to be 1x1 float roi_width = max(roi_end_w - roi_start_w, ((float)(0.0))); float roi_height = max(roi_end_h - roi_start_h, ((float)(0.0))); float bin_size_h = static_cast<float>(roi_height) / static_cast<float>(pooled_height); float bin_size_w = static_cast<float>(roi_width) / static_cast<float>(pooled_width); // regularly sample from a sample_height * sample_width grid bottom_data += (roi_batch_ind * channels + c) * height * width; float sample_h_rate = 1.0f / float(sample_height); float sample_w_rate = 1.0f / float(sample_width); float hcenter; float wcenter; Pooler pooler; for (int h_iter = 0; h_iter < sample_height; ++h_iter) { for (int w_iter = 0; w_iter < sample_width; ++w_iter) { hcenter = roi_start_h + bin_size_h * (ph + sample_h_rate * (h_iter + 0.5f)); wcenter = roi_start_w + bin_size_w * (pw + sample_w_rate * (w_iter + 0.5f)); T val = bilinear_interp(bottom_data, hcenter, wcenter, height, width); int idx = h_iter * sample_width + w_iter; pooler.feed(val, idx); } } pooler.writeback_val(top_data[index]); pooler.writeback_idx(argmax_data[index]); } } template <typename T, typename BwdPooler> __global__ void backward_kernel(const int nthreads, const T* top_diff, const T* bottom_rois, const int* argmax_data, const float spatial_scale, const float offset, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sample_height, const int sample_width, T* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale - offset; float roi_start_h = bottom_rois[2] * spatial_scale - offset; float roi_end_w = bottom_rois[3] * spatial_scale - offset; float roi_end_h = bottom_rois[4] * spatial_scale - offset; // Force malformed ROIs to be 1x1 float roi_width = max(roi_end_w - roi_start_w, ((float)(0.0))); float roi_height = max(roi_end_h - roi_start_h, ((float)(0.0))); float bin_size_h = static_cast<float>(roi_height) / static_cast<float>(pooled_height); float bin_size_w = static_cast<float>(roi_width) / static_cast<float>(pooled_width); // regularly sample from a sample_height * sample_width grid bottom_diff += (roi_batch_ind * channels + c) * height * width; BwdPooler pooler{ph, pw, sample_height, sample_width, height, width, roi_start_h, roi_start_w, bin_size_h, bin_size_w}; pooler.update(index, top_diff, argmax_data, bottom_diff); } } template <typename T, typename Pooler> void forward_proxy(const int nthreads, const T* bottom_data, const float spatial_scale, const float offset, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sample_height, const int sample_width, const T* bottom_rois, T* top_data, int* argmax_data, hipStream_t stream) { int threads_block = query_blocksize_for_kernel(forward_kernel<T, Pooler>); hipLaunchKernelGGL(( forward_kernel<T, Pooler>) , dim3(DIVUP(nthreads, threads_block)), dim3(threads_block), 0, stream, nthreads, bottom_data, spatial_scale, offset, channels, height, width, pooled_height, pooled_width, sample_height, sample_width, bottom_rois, top_data, argmax_data); after_kernel_launch(); } template <typename T, typename BwdPooler> void backward_proxy(const int nthreads, const T* top_diff, const int* argmax_data, const float spatial_scale, const float offset, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sample_height, const int sample_width, const T* bottom_rois, T* bottom_diff, hipStream_t stream) { int threads_block = query_blocksize_for_kernel(backward_kernel<T, BwdPooler>); hipLaunchKernelGGL(( backward_kernel<T, BwdPooler>) , dim3(DIVUP(nthreads, threads_block)), dim3(threads_block), 0, stream, nthreads, top_diff, bottom_rois, argmax_data, spatial_scale, offset, channels, height, width, pooled_height, pooled_width, sample_height, sample_width, bottom_diff); after_kernel_launch(); } #define INST(T) \ template void forward_proxy<T, ::megdnn::roi_align::MaxPooler<T>>( \ const int, const T*, const float, const float, const int, \ const int, const int, const int, const int, const int, const int, \ const T*, T*, int*, hipStream_t); \ template void forward_proxy<T, ::megdnn::roi_align::AveragePooler<T>>( \ const int, const T*, const float, const float, const int, \ const int, const int, const int, const int, const int, const int, \ const T*, T*, int*, hipStream_t); \ template void backward_proxy<T, ::megdnn::roi_align::BwdMaxPooler<T>>( \ const int, const T*, const int*, const float, const float, \ const int, const int, const int, const int, const int, const int, \ const int, const T*, T*, hipStream_t); \ template void backward_proxy<T, ::megdnn::roi_align::BwdAveragePooler<T>>( \ const int, const T*, const int*, const float, const float, \ const int, const int, const int, const int, const int, const int, \ const int, const T*, T*, hipStream_t); INST(dt_float32) INST(dt_float16) #undef INST } // namespace roi_align } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
807a36c6c22f3d7dba6f21a046c6dcae079ec486.cu
/** * \file dnn/src/cuda/roi_align/roi_align.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "src/cuda/roi_align/roi_align.cuh" #include <cfloat> #include "megdnn/dtype.h" #include "src/common/roi_align_helper.h" #include "src/cuda/query_blocksize.cuh" #include "src/cuda/utils.cuh" using namespace megdnn; using namespace roi_align; namespace megdnn { namespace cuda { namespace roi_align { #define CUDA_KERNEL_LOOP(vtid, vthreads) \ for (int vtid = blockIdx.x * blockDim.x + threadIdx.x; vtid < vthreads; \ vtid += blockDim.x * gridDim.x) template <typename T, typename Pooler> __global__ void forward_kernel(const int nthreads, const T* bottom_data, const float spatial_scale, const float offset, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sample_height, const int sample_width, const T* bottom_rois, T* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale - offset; float roi_start_h = bottom_rois[2] * spatial_scale - offset; float roi_end_w = bottom_rois[3] * spatial_scale - offset; float roi_end_h = bottom_rois[4] * spatial_scale - offset; // Force malformed ROIs to be 1x1 float roi_width = max(roi_end_w - roi_start_w, ((float)(0.0))); float roi_height = max(roi_end_h - roi_start_h, ((float)(0.0))); float bin_size_h = static_cast<float>(roi_height) / static_cast<float>(pooled_height); float bin_size_w = static_cast<float>(roi_width) / static_cast<float>(pooled_width); // regularly sample from a sample_height * sample_width grid bottom_data += (roi_batch_ind * channels + c) * height * width; float sample_h_rate = 1.0f / float(sample_height); float sample_w_rate = 1.0f / float(sample_width); float hcenter; float wcenter; Pooler pooler; for (int h_iter = 0; h_iter < sample_height; ++h_iter) { for (int w_iter = 0; w_iter < sample_width; ++w_iter) { hcenter = roi_start_h + bin_size_h * (ph + sample_h_rate * (h_iter + 0.5f)); wcenter = roi_start_w + bin_size_w * (pw + sample_w_rate * (w_iter + 0.5f)); T val = bilinear_interp(bottom_data, hcenter, wcenter, height, width); int idx = h_iter * sample_width + w_iter; pooler.feed(val, idx); } } pooler.writeback_val(top_data[index]); pooler.writeback_idx(argmax_data[index]); } } template <typename T, typename BwdPooler> __global__ void backward_kernel(const int nthreads, const T* top_diff, const T* bottom_rois, const int* argmax_data, const float spatial_scale, const float offset, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sample_height, const int sample_width, T* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale - offset; float roi_start_h = bottom_rois[2] * spatial_scale - offset; float roi_end_w = bottom_rois[3] * spatial_scale - offset; float roi_end_h = bottom_rois[4] * spatial_scale - offset; // Force malformed ROIs to be 1x1 float roi_width = max(roi_end_w - roi_start_w, ((float)(0.0))); float roi_height = max(roi_end_h - roi_start_h, ((float)(0.0))); float bin_size_h = static_cast<float>(roi_height) / static_cast<float>(pooled_height); float bin_size_w = static_cast<float>(roi_width) / static_cast<float>(pooled_width); // regularly sample from a sample_height * sample_width grid bottom_diff += (roi_batch_ind * channels + c) * height * width; BwdPooler pooler{ph, pw, sample_height, sample_width, height, width, roi_start_h, roi_start_w, bin_size_h, bin_size_w}; pooler.update(index, top_diff, argmax_data, bottom_diff); } } template <typename T, typename Pooler> void forward_proxy(const int nthreads, const T* bottom_data, const float spatial_scale, const float offset, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sample_height, const int sample_width, const T* bottom_rois, T* top_data, int* argmax_data, cudaStream_t stream) { int threads_block = query_blocksize_for_kernel(forward_kernel<T, Pooler>); forward_kernel<T, Pooler> <<<DIVUP(nthreads, threads_block), threads_block, 0, stream>>>( nthreads, bottom_data, spatial_scale, offset, channels, height, width, pooled_height, pooled_width, sample_height, sample_width, bottom_rois, top_data, argmax_data); after_kernel_launch(); } template <typename T, typename BwdPooler> void backward_proxy(const int nthreads, const T* top_diff, const int* argmax_data, const float spatial_scale, const float offset, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sample_height, const int sample_width, const T* bottom_rois, T* bottom_diff, cudaStream_t stream) { int threads_block = query_blocksize_for_kernel(backward_kernel<T, BwdPooler>); backward_kernel<T, BwdPooler> <<<DIVUP(nthreads, threads_block), threads_block, 0, stream>>>( nthreads, top_diff, bottom_rois, argmax_data, spatial_scale, offset, channels, height, width, pooled_height, pooled_width, sample_height, sample_width, bottom_diff); after_kernel_launch(); } #define INST(T) \ template void forward_proxy<T, ::megdnn::roi_align::MaxPooler<T>>( \ const int, const T*, const float, const float, const int, \ const int, const int, const int, const int, const int, const int, \ const T*, T*, int*, cudaStream_t); \ template void forward_proxy<T, ::megdnn::roi_align::AveragePooler<T>>( \ const int, const T*, const float, const float, const int, \ const int, const int, const int, const int, const int, const int, \ const T*, T*, int*, cudaStream_t); \ template void backward_proxy<T, ::megdnn::roi_align::BwdMaxPooler<T>>( \ const int, const T*, const int*, const float, const float, \ const int, const int, const int, const int, const int, const int, \ const int, const T*, T*, cudaStream_t); \ template void backward_proxy<T, ::megdnn::roi_align::BwdAveragePooler<T>>( \ const int, const T*, const int*, const float, const float, \ const int, const int, const int, const int, const int, const int, \ const int, const T*, T*, cudaStream_t); INST(dt_float32) INST(dt_float16) #undef INST } // namespace roi_align } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
8829ae90147902d52e145a7bb519bfbfa28d36d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cu_kron(const float *a, const float* b, float* dst, const int rowsa, const int colsa, const int rowsdst, const int colsdst, const int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int colsb = colsdst / colsa; int rowsb = rowsdst / rowsa; while(tid < n){ int c2 = tid % colsdst; int r2 = tid / colsdst; int rb = r2 % rowsb; int cb = c2 % colsb; int ra = r2 / rowsb; int ca = c2 / colsb; dst[tid] = a[ra * colsa + ca] * b[rb * colsb + cb]; tid += stride; } }
8829ae90147902d52e145a7bb519bfbfa28d36d7.cu
#include "includes.h" __global__ void cu_kron(const float *a, const float* b, float* dst, const int rowsa, const int colsa, const int rowsdst, const int colsdst, const int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int colsb = colsdst / colsa; int rowsb = rowsdst / rowsa; while(tid < n){ int c2 = tid % colsdst; int r2 = tid / colsdst; int rb = r2 % rowsb; int cb = c2 % colsb; int ra = r2 / rowsb; int ca = c2 / colsb; dst[tid] = a[ra * colsa + ca] * b[rb * colsb + cb]; tid += stride; } }
9d34c34d3fdce6616929fd4631661b1cd40b67d4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "device_launch_parameters.h" #include "hip/hip_runtime.h" __global__ void HelloFromGPU(void) { printf("Hello World From GPU!\n"); } extern "C" int Hello() { hipLaunchKernelGGL(( HelloFromGPU) , dim3(2),dim3(2), 0, 0, ); return 0; }
9d34c34d3fdce6616929fd4631661b1cd40b67d4.cu
#include <stdio.h> #include "device_launch_parameters.h" #include "cuda_runtime.h" __global__ void HelloFromGPU(void) { printf("Hello World From GPU!\n"); } extern "C" int Hello() { HelloFromGPU <<<2,2>>> (); return 0; }
479c2f37c3cf935d8a78d0674b0052442352094f.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <random> #include <tuple> #include <vector> #include <iostream> #include <ctc.h> #include "test.h" bool small_test() { const int alphabet_size = 5; const int T = 2; std::vector<float> activations = {0.1, 0.6, 0.1, 0.1, 0.1, 0.1, 0.1, 0.6, 0.1, 0.1}; // Calculate the score analytically float expected_score; { std::vector<float> probs(activations.size()); softmax(activations.data(), alphabet_size, T, probs.data()); // Score calculation is specific to the given activations above expected_score = probs[1] * probs[7]; } hipStream_t stream; throw_on_error(hipStreamCreate(&stream), "hipStreamCreate"); float *activations_gpu; throw_on_error(hipMalloc(&activations_gpu, activations.size() * sizeof(float)), "hipMalloc"); throw_on_error(hipMemcpyAsync(activations_gpu, activations.data(), activations.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); std::vector<int> labels = {1, 2}; std::vector<int> label_lengths = {2}; std::vector<int> lengths; lengths.push_back(T); float score; ctcComputeInfo info; info.loc = CTC_GPU; info.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), info, &gpu_alloc_bytes), "Error: get_workspace_size in small_test"); char *ctc_gpu_workspace; throw_on_error(hipMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "hipMalloc"); throw_on_error(compute_ctc_loss(activations_gpu, NULL, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &score, ctc_gpu_workspace, info), "Error: compute_ctc_loss in small_test"); score = ::exp(-score); const float eps = 1e-6; const float lb = expected_score - eps; const float ub = expected_score + eps; throw_on_error(hipFree(activations_gpu), "hipFree"); throw_on_error(hipFree(ctc_gpu_workspace), "hipFree"); throw_on_error(hipStreamDestroy(stream), "hipStreamDestroy"); return (score > lb && score < ub); } bool inf_test() { const int alphabet_size = 15; const int T = 50; const int L = 10; const int minibatch = 1; std::vector<int> labels = genLabels(alphabet_size, L); labels[0] = 2; std::vector<int> label_lengths = {L}; std::vector<float> acts = genActs(alphabet_size * T * minibatch); for (int i = 0; i < T; ++i) acts[alphabet_size * i + 2] = -1e30; hipStream_t stream; throw_on_error(hipStreamCreate(&stream), "hipStreamCreate"); float *acts_gpu; throw_on_error(hipMalloc(&acts_gpu, acts.size() * sizeof(float)), "hipMalloc"); throw_on_error(hipMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); std::vector<int> lengths; lengths.push_back(T); float *grads_gpu; throw_on_error(hipMalloc(&grads_gpu, (alphabet_size * T) * sizeof(float)), "hipMalloc"); float cost; ctcComputeInfo info; info.loc = CTC_GPU; info.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), info, &gpu_alloc_bytes), "Error: get_workspace_size in inf_test"); char *ctc_gpu_workspace; throw_on_error(hipMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "hipMalloc"); throw_on_error(compute_ctc_loss(acts_gpu, grads_gpu, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &cost, ctc_gpu_workspace, info), "Error: compute_ctc_loss in inf_test"); bool status = std::isinf(cost); std::vector<float> grads(alphabet_size * T); throw_on_error(hipMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), hipMemcpyDeviceToHost, stream), "hipMemcpyAsync"); throw_on_error(hipStreamSynchronize(stream), "hipStreamSynchronize"); for (int i = 0; i < alphabet_size * T; ++i) status &= !std::isnan(grads[i]); throw_on_error(hipFree(acts_gpu), "hipFree"); throw_on_error(hipFree(grads_gpu), "hipFree"); throw_on_error(hipFree(ctc_gpu_workspace), "hipFree"); throw_on_error(hipStreamDestroy(stream), "hipStreamDestroy"); return status; } float grad_check(int T, int alphabet_size, std::vector<float>& acts, const std::vector<std::vector<int>>& labels, const std::vector<int>& lengths) { float epsilon = 1e-2; const int minibatch = labels.size(); hipStream_t stream; throw_on_error(hipStreamCreate(&stream), "hipStreamCreate"); float *acts_gpu; throw_on_error(hipMalloc(&acts_gpu, acts.size() * sizeof(float)), "hipMalloc"); throw_on_error(hipMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); std::vector<int> flat_labels; std::vector<int> label_lengths; for (const auto& l : labels) { flat_labels.insert(flat_labels.end(), l.begin(), l.end()); label_lengths.push_back(l.size()); } std::vector<float> costs(minibatch); float *grads_gpu; throw_on_error(hipMalloc(&grads_gpu, acts.size() * sizeof(float)), "hipMalloc"); ctcComputeInfo info; info.loc = CTC_GPU; info.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), info, &gpu_alloc_bytes), "Error: get_workspace_size in grad_check"); char *ctc_gpu_workspace; throw_on_error(hipMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "hipMalloc"); throw_on_error(compute_ctc_loss(acts_gpu, grads_gpu, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costs.data(), ctc_gpu_workspace, info), "Error: compute_ctc_loss (0) in grad_check"); std::vector<float> grads(acts.size()); throw_on_error(hipMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), hipMemcpyDeviceToHost, stream), "hipMemcpyAsync"); throw_on_error(hipStreamSynchronize(stream), "hipStreamSynchronize"); std::vector<float> num_grad(grads.size()); //perform 2nd order central differencing for (int i = 0; i < T * alphabet_size * minibatch; ++i) { acts[i] += epsilon; throw_on_error(hipMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); std::vector<float> costsP1(minibatch); std::vector<float> costsP2(minibatch); throw_on_error(compute_ctc_loss(acts_gpu, NULL, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costsP1.data(), ctc_gpu_workspace, info), "Error: compute_ctc_loss (1) in grad_check"); acts[i] -= 2 * epsilon; throw_on_error(hipMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); throw_on_error(compute_ctc_loss(acts_gpu, NULL, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costsP2.data(), ctc_gpu_workspace, info), "Error: compute_ctc_loss (2) in grad_check"); float costP1 = std::accumulate(costsP1.begin(), costsP1.end(), 0.); float costP2 = std::accumulate(costsP2.begin(), costsP2.end(), 0.); acts[i] += epsilon; num_grad[i] = (costP1 - costP2) / (2 * epsilon); } float diff = rel_diff(grads, num_grad); throw_on_error(hipFree(acts_gpu), "hipFree"); throw_on_error(hipFree(grads_gpu), "hipFree"); throw_on_error(hipFree(ctc_gpu_workspace), "hipFree"); throw_on_error(hipStreamDestroy(stream), "hipStreamDestroy"); return diff; } bool run_tests() { std::vector<std::tuple<int, int, int, int, float>> problem_sizes = { std::make_tuple(28, 50, 15, 1, 1e-5), std::make_tuple(5, 10, 5, 65, 1e-4) }; bool status = true; for (auto problem : problem_sizes) { int alphabet_size, T, L, minibatch; float tol; std::tie(alphabet_size, T, L, minibatch, tol) = problem; std::vector<float> acts = genActs(alphabet_size * T * minibatch); std::vector<std::vector<int>> labels; std::vector<int> sizes; for (int mb = 0; mb < minibatch; ++mb) { int actual_length = L; labels.push_back(genLabels(alphabet_size, actual_length)); sizes.push_back(T); } float diff = grad_check(T, alphabet_size, acts, labels, sizes); status &= (diff < tol); } return status; } int main(void) { std::cout << "Running GPU tests" << std::endl; throw_on_error(hipSetDevice(0), "hipSetDevice"); bool status = true; status &= small_test(); status &= inf_test(); status &= run_tests(); if (status) std::cout << "Tests pass" << std::endl; else std::cout << "Some or all tests fail" << std::endl; }
479c2f37c3cf935d8a78d0674b0052442352094f.cu
#include <cmath> #include <random> #include <tuple> #include <vector> #include <iostream> #include <ctc.h> #include "test.h" bool small_test() { const int alphabet_size = 5; const int T = 2; std::vector<float> activations = {0.1, 0.6, 0.1, 0.1, 0.1, 0.1, 0.1, 0.6, 0.1, 0.1}; // Calculate the score analytically float expected_score; { std::vector<float> probs(activations.size()); softmax(activations.data(), alphabet_size, T, probs.data()); // Score calculation is specific to the given activations above expected_score = probs[1] * probs[7]; } cudaStream_t stream; throw_on_error(cudaStreamCreate(&stream), "cudaStreamCreate"); float *activations_gpu; throw_on_error(cudaMalloc(&activations_gpu, activations.size() * sizeof(float)), "cudaMalloc"); throw_on_error(cudaMemcpyAsync(activations_gpu, activations.data(), activations.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<int> labels = {1, 2}; std::vector<int> label_lengths = {2}; std::vector<int> lengths; lengths.push_back(T); float score; ctcComputeInfo info; info.loc = CTC_GPU; info.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), info, &gpu_alloc_bytes), "Error: get_workspace_size in small_test"); char *ctc_gpu_workspace; throw_on_error(cudaMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "cudaMalloc"); throw_on_error(compute_ctc_loss(activations_gpu, NULL, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &score, ctc_gpu_workspace, info), "Error: compute_ctc_loss in small_test"); score = std::exp(-score); const float eps = 1e-6; const float lb = expected_score - eps; const float ub = expected_score + eps; throw_on_error(cudaFree(activations_gpu), "cudaFree"); throw_on_error(cudaFree(ctc_gpu_workspace), "cudaFree"); throw_on_error(cudaStreamDestroy(stream), "cudaStreamDestroy"); return (score > lb && score < ub); } bool inf_test() { const int alphabet_size = 15; const int T = 50; const int L = 10; const int minibatch = 1; std::vector<int> labels = genLabels(alphabet_size, L); labels[0] = 2; std::vector<int> label_lengths = {L}; std::vector<float> acts = genActs(alphabet_size * T * minibatch); for (int i = 0; i < T; ++i) acts[alphabet_size * i + 2] = -1e30; cudaStream_t stream; throw_on_error(cudaStreamCreate(&stream), "cudaStreamCreate"); float *acts_gpu; throw_on_error(cudaMalloc(&acts_gpu, acts.size() * sizeof(float)), "cudaMalloc"); throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<int> lengths; lengths.push_back(T); float *grads_gpu; throw_on_error(cudaMalloc(&grads_gpu, (alphabet_size * T) * sizeof(float)), "cudaMalloc"); float cost; ctcComputeInfo info; info.loc = CTC_GPU; info.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), info, &gpu_alloc_bytes), "Error: get_workspace_size in inf_test"); char *ctc_gpu_workspace; throw_on_error(cudaMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "cudaMalloc"); throw_on_error(compute_ctc_loss(acts_gpu, grads_gpu, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &cost, ctc_gpu_workspace, info), "Error: compute_ctc_loss in inf_test"); bool status = std::isinf(cost); std::vector<float> grads(alphabet_size * T); throw_on_error(cudaMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), cudaMemcpyDeviceToHost, stream), "cudaMemcpyAsync"); throw_on_error(cudaStreamSynchronize(stream), "cudaStreamSynchronize"); for (int i = 0; i < alphabet_size * T; ++i) status &= !std::isnan(grads[i]); throw_on_error(cudaFree(acts_gpu), "cudaFree"); throw_on_error(cudaFree(grads_gpu), "cudaFree"); throw_on_error(cudaFree(ctc_gpu_workspace), "cudaFree"); throw_on_error(cudaStreamDestroy(stream), "cudaStreamDestroy"); return status; } float grad_check(int T, int alphabet_size, std::vector<float>& acts, const std::vector<std::vector<int>>& labels, const std::vector<int>& lengths) { float epsilon = 1e-2; const int minibatch = labels.size(); cudaStream_t stream; throw_on_error(cudaStreamCreate(&stream), "cudaStreamCreate"); float *acts_gpu; throw_on_error(cudaMalloc(&acts_gpu, acts.size() * sizeof(float)), "cudaMalloc"); throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<int> flat_labels; std::vector<int> label_lengths; for (const auto& l : labels) { flat_labels.insert(flat_labels.end(), l.begin(), l.end()); label_lengths.push_back(l.size()); } std::vector<float> costs(minibatch); float *grads_gpu; throw_on_error(cudaMalloc(&grads_gpu, acts.size() * sizeof(float)), "cudaMalloc"); ctcComputeInfo info; info.loc = CTC_GPU; info.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), info, &gpu_alloc_bytes), "Error: get_workspace_size in grad_check"); char *ctc_gpu_workspace; throw_on_error(cudaMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "cudaMalloc"); throw_on_error(compute_ctc_loss(acts_gpu, grads_gpu, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costs.data(), ctc_gpu_workspace, info), "Error: compute_ctc_loss (0) in grad_check"); std::vector<float> grads(acts.size()); throw_on_error(cudaMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), cudaMemcpyDeviceToHost, stream), "cudaMemcpyAsync"); throw_on_error(cudaStreamSynchronize(stream), "cudaStreamSynchronize"); std::vector<float> num_grad(grads.size()); //perform 2nd order central differencing for (int i = 0; i < T * alphabet_size * minibatch; ++i) { acts[i] += epsilon; throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<float> costsP1(minibatch); std::vector<float> costsP2(minibatch); throw_on_error(compute_ctc_loss(acts_gpu, NULL, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costsP1.data(), ctc_gpu_workspace, info), "Error: compute_ctc_loss (1) in grad_check"); acts[i] -= 2 * epsilon; throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); throw_on_error(compute_ctc_loss(acts_gpu, NULL, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costsP2.data(), ctc_gpu_workspace, info), "Error: compute_ctc_loss (2) in grad_check"); float costP1 = std::accumulate(costsP1.begin(), costsP1.end(), 0.); float costP2 = std::accumulate(costsP2.begin(), costsP2.end(), 0.); acts[i] += epsilon; num_grad[i] = (costP1 - costP2) / (2 * epsilon); } float diff = rel_diff(grads, num_grad); throw_on_error(cudaFree(acts_gpu), "cudaFree"); throw_on_error(cudaFree(grads_gpu), "cudaFree"); throw_on_error(cudaFree(ctc_gpu_workspace), "cudaFree"); throw_on_error(cudaStreamDestroy(stream), "cudaStreamDestroy"); return diff; } bool run_tests() { std::vector<std::tuple<int, int, int, int, float>> problem_sizes = { std::make_tuple(28, 50, 15, 1, 1e-5), std::make_tuple(5, 10, 5, 65, 1e-4) }; bool status = true; for (auto problem : problem_sizes) { int alphabet_size, T, L, minibatch; float tol; std::tie(alphabet_size, T, L, minibatch, tol) = problem; std::vector<float> acts = genActs(alphabet_size * T * minibatch); std::vector<std::vector<int>> labels; std::vector<int> sizes; for (int mb = 0; mb < minibatch; ++mb) { int actual_length = L; labels.push_back(genLabels(alphabet_size, actual_length)); sizes.push_back(T); } float diff = grad_check(T, alphabet_size, acts, labels, sizes); status &= (diff < tol); } return status; } int main(void) { std::cout << "Running GPU tests" << std::endl; throw_on_error(cudaSetDevice(0), "cudaSetDevice"); bool status = true; status &= small_test(); status &= inf_test(); status &= run_tests(); if (status) std::cout << "Tests pass" << std::endl; else std::cout << "Some or all tests fail" << std::endl; }
783f37a01fcc96dc470305aa3253f5b895b7a545.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/VolumetricDilatedMaxPooling.cu" #else #define UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \ hipLaunchKernelGGL(( cuda_VolumetricDilatedMaxPooling_updateOutput<KW>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \ inputData, inputTime, inputHeight, inputWidth, \ cudaIndices, cudaOutput, kT, kH, dT, dH, dW, padT, padH, padW,\ dilationT, dilationH, dilationW, offsetZ); \ break static inline void THNN_(VolumetricDilatedMaxPooling_shapeCheck)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCIndexTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, bool ceilMode) { int ndim = input->dim(); int inputSlices; int inputTime; int inputHeight; int inputWidth; int outputTime; int outputHeight; int outputWidth; int dimf = 0; int dimt = 1; int dimh = 2; int dimw = 3; THArgCheck(kT > 0 && kW > 0 && kH > 0, 7, "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW); THArgCheck(dT > 0 && dW > 0 && dH > 0, 10, "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); THArgCheck(dilationT > 0 && dilationW > 0 && dilationH > 0, 16, "dilation should be greater than 0, but got dilationT: %d dilationH: %d dilationW: %d", dilationT, dilationH, dilationW); if (input->dim() == 5) { dimf++; dimt++; dimh++; dimw++; } if (THCTensor_(nDimension)(state, input) == 4) { /* sizes */ inputSlices = THCTensor_(size)(state, input, 0); inputTime = THCTensor_(size)(state, input, 1); inputHeight = THCTensor_(size)(state, input, 2); inputWidth = THCTensor_(size)(state, input, 3); } else if (THCTensor_(nDimension)(state, input) == 5) { /* sizes */ inputSlices = THCTensor_(size)(state, input, 1); inputTime = THCTensor_(size)(state, input, 2); inputHeight = THCTensor_(size)(state, input, 3); inputWidth = THCTensor_(size)(state, input, 4); } else { AT_ERROR("non-empty 4D or 5D tensor expected, got size: ", input->sizes()); } THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 13, "pad should be smaller than half of kernel size, but got " "kT: %d kW: %d, kH: %d, padT: %d, padW: %d, padH: %d", kT, kW, kH, padT, padW, padH); if (ceilMode) { outputTime = (int)(ceil((float)(inputTime - (dilationT * (kT - 1) + 1) + 2*padT) / dT)) + 1; outputHeight = (int)(ceil((float)(inputHeight - (dilationH * (kH - 1) + 1) + 2*padH) / dH)) + 1; outputWidth = (int)(ceil((float)(inputWidth - (dilationW * (kW - 1) + 1) + 2*padW) / dW)) + 1; } else { outputTime = (int)(floor((float)(inputTime - (dilationT * (kT - 1) + 1) + 2*padT) / dT)) + 1; outputHeight = (int)(floor((float)(inputHeight - (dilationH * (kH - 1) + 1) + 2*padH) / dH)) + 1; outputWidth = (int)(floor((float)(inputWidth - (dilationW * (kW - 1) + 1) + 2*padW) / dW)) + 1; } if (padT || padW || padH) { if ((outputTime - 1)*dT >= inputTime + padT) --outputTime; if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } if (outputTime < 1 || outputHeight < 1 || outputWidth < 1) THError("Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small", inputSlices,inputTime,inputHeight,inputWidth,inputSlices,outputTime,outputHeight,outputWidth); if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, ndim, dimf, inputSlices); THCUNN_check_dim_size(state, gradOutput, ndim, dimt, outputTime); THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight); THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth); } if (indices != NULL) { THCUNN_check_dim_size_indices(state, indices, ndim, dimf, inputSlices); THCUNN_check_dim_size_indices(state, indices, ndim, dimt, outputTime); THCUNN_check_dim_size_indices(state, indices, ndim, dimh, outputHeight); THCUNN_check_dim_size_indices(state, indices, ndim, dimw, outputWidth); } } void THNN_(VolumetricDilatedMaxPooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCIndexTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, bool ceilMode) { int batchSize; int inputSlices; int inputTime; int inputHeight; int inputWidth; int outputTime; int outputHeight; int outputWidth; int dimt = 1; int dimh = 2; int dimw = 3; int fiveDimensionalInput = THCTensor_(nDimension)(state, input) == 5; if (fiveDimensionalInput) { dimt++; dimh++; dimw++; } THCUNN_assertSameGPU(state, 3, input, indices, output); THNN_(VolumetricDilatedMaxPooling_shapeCheck)( state, input, NULL, NULL, kT, kW, kH, dT, dW, dH, padT, padW, padH, dilationT, dilationW, dilationH, ceilMode); if (THCTensor_(nDimension)(state, input) == 4) { /* sizes */ batchSize = 1; inputSlices = THCTensor_(size)(state, input, 0); inputTime = THCTensor_(size)(state, input, 1); inputHeight = THCTensor_(size)(state, input, 2); inputWidth = THCTensor_(size)(state, input, 3); } else if (fiveDimensionalInput) { /* sizes */ batchSize = THCTensor_(size)(state, input, 0); inputSlices = THCTensor_(size)(state, input, 1); inputTime = THCTensor_(size)(state, input, 2); inputHeight = THCTensor_(size)(state, input, 3); inputWidth = THCTensor_(size)(state, input, 4); } else { AT_ERROR("non-empty 4D or 5D tensor expected, got size: ", input->sizes()); } if (ceilMode) { outputTime = (int)(ceil((float)(inputTime - (dilationT * (kT - 1) + 1) + 2*padT) / dT)) + 1; outputHeight = (int)(ceil((float)(inputHeight - (dilationH * (kH - 1) + 1) + 2*padH) / dH)) + 1; outputWidth = (int)(ceil((float)(inputWidth - (dilationW * (kW - 1) + 1) + 2*padW) / dW)) + 1; } else { outputTime = (int)(floor((float)(inputTime - (dilationT * (kT - 1) + 1) + 2*padT) / dT)) + 1; outputHeight = (int)(floor((float)(inputHeight - (dilationH * (kH - 1) + 1) + 2*padH) / dH)) + 1; outputWidth = (int)(floor((float)(inputWidth - (dilationW * (kW - 1) + 1) + 2*padW) / dW)) + 1; } if (padT || padW || padH) { if ((outputTime - 1)*dT >= inputTime + padT) --outputTime; if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } if (!fiveDimensionalInput) /* 4D */ { /* resize output */ THCTensor_(resize4d)(state, output, inputSlices, outputTime, outputHeight, outputWidth); /* indices pack ti,i,j locations for each output point as uchar into each float of the tensor */ THCIndexTensor_(resize4d)(state, indices, inputSlices, outputTime, outputHeight, outputWidth); } else { /* 5D */ THCTensor_(resize5d)(state, output, batchSize, inputSlices, outputTime, outputHeight, outputWidth); // Index tensor packs index offsets as uchars into floats THCIndexTensor_(resize5d)(state, indices, batchSize, inputSlices, outputTime, outputHeight, outputWidth); fiveDimensionalInput = 1; } input = THCTensor_(newContiguous)(state, input); if (fiveDimensionalInput) { // Collapse batch and feature dimensions output = THCTensor_(newFoldBatchDim)(state, output); THCTensor *old_input = input; input = THCTensor_(newFoldBatchDim)(state, input); THCTensor_(free)(state, old_input); } else { THCTensor_(retain)(state, output); } real* inputData = THCTensor_(data)(state, input); THCDeviceTensor<real, 4> cudaOutput; cudaOutput = toDeviceTensor<real, 4>(state, output); THLongStorage *indicesSize = THLongStorage_newWithSize(4); int64_t indicesSizeRaw[4] = { batchSize * inputSlices, outputTime, outputHeight, outputWidth }; THLongStorage_rawCopy(indicesSize, indicesSizeRaw); THCIndexTensor *indices1 = THCIndexTensor_(newWithStorage)( state, THCIndexTensor_(storage)(state, indices), THCIndexTensor_(storageOffset)(state, indices), indicesSize, NULL); THLongStorage_free(indicesSize); THCDeviceTensor<THCIndex_t, 4> cudaIndices = toDeviceTensor<THCIndex_t, 4>(state, indices1); int totalZ = outputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)), THCCeilDiv(outputHeight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); switch (kW) { UPDATE_OUTPUT_KERNEL_WIDTH(1); UPDATE_OUTPUT_KERNEL_WIDTH(2); UPDATE_OUTPUT_KERNEL_WIDTH(3); UPDATE_OUTPUT_KERNEL_WIDTH(4); UPDATE_OUTPUT_KERNEL_WIDTH(5); UPDATE_OUTPUT_KERNEL_WIDTH(6); UPDATE_OUTPUT_KERNEL_WIDTH(7); default: hipLaunchKernelGGL(( cuda_VolumetricDilatedMaxPooling_updateOutput), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), inputData, inputTime, inputHeight, inputWidth, cudaIndices, cudaOutput, kT, kH, kW, dT, dH, dW, padT, padH, padW, dilationT, dilationH, dilationW, offsetZ); } THCudaCheck(hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } THCTensor_(free)(state, input); THCTensor_(free)(state, output); THCIndexTensor_(free)(state, indices1); } #undef UPDATE_OUTPUT_KERNEL_WIDTH void THNN_(VolumetricDilatedMaxPooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCIndexTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, bool ceilMode) { // TODO: gradOutput shape check // Resize and initialize result tensor. THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); int batchSize; int inputSlices; int outputTime, outputHeight, outputWidth; int inputTime, inputHeight, inputWidth; int fiveDimensionalInput = THCTensor_(nDimension)(state, input) == 5; THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput); THNN_(VolumetricDilatedMaxPooling_shapeCheck)( state, input, gradOutput, indices, kT, kW, kH, dT, dW, dH, padT, padW, padH, dilationT, dilationW, dilationH, ceilMode); if (!fiveDimensionalInput) /* 4D */ { batchSize = 1; inputSlices = THCTensor_(size)(state, input, 0); outputTime = THCTensor_(size)(state, gradOutput, 1); outputHeight = THCTensor_(size)(state, gradOutput, 2); outputWidth = THCTensor_(size)(state, gradOutput, 3); inputTime = THCTensor_(size)(state, gradInput, 1); inputHeight = THCTensor_(size)(state, gradInput, 2); inputWidth = THCTensor_(size)(state, gradInput, 3); } else { batchSize = THCTensor_(size)(state, input, 0); inputSlices = THCTensor_(size)(state, input, 1); outputTime = THCTensor_(size)(state, gradOutput, 2); outputHeight = THCTensor_(size)(state, gradOutput, 3); outputWidth = THCTensor_(size)(state, gradOutput, 4); inputTime = THCTensor_(size)(state, gradInput, 2); inputHeight = THCTensor_(size)(state, gradInput, 3); inputWidth = THCTensor_(size)(state, gradInput, 4); } gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (fiveDimensionalInput) { // Collapse batch and feature dimensions gradInput = THCTensor_(newFoldBatchDim)(state, gradInput); THCTensor *old_gradOutput = gradOutput; gradOutput = THCTensor_(newFoldBatchDim)(state, gradOutput); THCTensor_(free)(state, old_gradOutput); } else { THCTensor_(retain)(state, gradInput); } THCDeviceTensor<real, 4> cudaGradOutput; cudaGradOutput = toDeviceTensor<real, 4>(state, gradOutput); real* gradInputData = THCTensor_(data)(state, gradInput); THLongStorage *indicesSize = THLongStorage_newWithSize(4); int64_t indicesSizeRaw[4] = { batchSize * inputSlices, outputTime, outputHeight, outputWidth }; THLongStorage_rawCopy(indicesSize, indicesSizeRaw); THCIndexTensor *indices1 = THCIndexTensor_(newWithStorage)( state, THCIndexTensor_(storage)(state, indices), THCIndexTensor_(storageOffset)(state, indices), indicesSize, NULL); THLongStorage_free(indicesSize); THCDeviceTensor<THCIndex_t, 4> cudaIndices = toDeviceTensor<THCIndex_t, 4>(state, indices1); int64_t totalZ = outputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)), THCCeilDiv(outputHeight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); hipLaunchKernelGGL(( cuda_VolumetricDilatedMaxPooling_updateGradInput), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), cudaGradOutput, cudaIndices, gradInputData, inputTime, inputHeight, inputWidth, dT, dH, dW, padT, padH, padW, dilationT, dilationH, dilationW, offsetZ); THCudaCheck(hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } // cleanup THCTensor_(free)(state, gradInput); THCTensor_(free)(state, gradOutput); THCIndexTensor_(free)(state, indices1); } #endif
783f37a01fcc96dc470305aa3253f5b895b7a545.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/VolumetricDilatedMaxPooling.cu" #else #define UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \ cuda_VolumetricDilatedMaxPooling_updateOutput<KW> \ <<<grid, block, 0, THCState_getCurrentStream(state)>>>( \ inputData, inputTime, inputHeight, inputWidth, \ cudaIndices, cudaOutput, kT, kH, dT, dH, dW, padT, padH, padW,\ dilationT, dilationH, dilationW, offsetZ); \ break static inline void THNN_(VolumetricDilatedMaxPooling_shapeCheck)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCIndexTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, bool ceilMode) { int ndim = input->dim(); int inputSlices; int inputTime; int inputHeight; int inputWidth; int outputTime; int outputHeight; int outputWidth; int dimf = 0; int dimt = 1; int dimh = 2; int dimw = 3; THArgCheck(kT > 0 && kW > 0 && kH > 0, 7, "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW); THArgCheck(dT > 0 && dW > 0 && dH > 0, 10, "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); THArgCheck(dilationT > 0 && dilationW > 0 && dilationH > 0, 16, "dilation should be greater than 0, but got dilationT: %d dilationH: %d dilationW: %d", dilationT, dilationH, dilationW); if (input->dim() == 5) { dimf++; dimt++; dimh++; dimw++; } if (THCTensor_(nDimension)(state, input) == 4) { /* sizes */ inputSlices = THCTensor_(size)(state, input, 0); inputTime = THCTensor_(size)(state, input, 1); inputHeight = THCTensor_(size)(state, input, 2); inputWidth = THCTensor_(size)(state, input, 3); } else if (THCTensor_(nDimension)(state, input) == 5) { /* sizes */ inputSlices = THCTensor_(size)(state, input, 1); inputTime = THCTensor_(size)(state, input, 2); inputHeight = THCTensor_(size)(state, input, 3); inputWidth = THCTensor_(size)(state, input, 4); } else { AT_ERROR("non-empty 4D or 5D tensor expected, got size: ", input->sizes()); } THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 13, "pad should be smaller than half of kernel size, but got " "kT: %d kW: %d, kH: %d, padT: %d, padW: %d, padH: %d", kT, kW, kH, padT, padW, padH); if (ceilMode) { outputTime = (int)(ceil((float)(inputTime - (dilationT * (kT - 1) + 1) + 2*padT) / dT)) + 1; outputHeight = (int)(ceil((float)(inputHeight - (dilationH * (kH - 1) + 1) + 2*padH) / dH)) + 1; outputWidth = (int)(ceil((float)(inputWidth - (dilationW * (kW - 1) + 1) + 2*padW) / dW)) + 1; } else { outputTime = (int)(floor((float)(inputTime - (dilationT * (kT - 1) + 1) + 2*padT) / dT)) + 1; outputHeight = (int)(floor((float)(inputHeight - (dilationH * (kH - 1) + 1) + 2*padH) / dH)) + 1; outputWidth = (int)(floor((float)(inputWidth - (dilationW * (kW - 1) + 1) + 2*padW) / dW)) + 1; } if (padT || padW || padH) { if ((outputTime - 1)*dT >= inputTime + padT) --outputTime; if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } if (outputTime < 1 || outputHeight < 1 || outputWidth < 1) THError("Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small", inputSlices,inputTime,inputHeight,inputWidth,inputSlices,outputTime,outputHeight,outputWidth); if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, ndim, dimf, inputSlices); THCUNN_check_dim_size(state, gradOutput, ndim, dimt, outputTime); THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight); THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth); } if (indices != NULL) { THCUNN_check_dim_size_indices(state, indices, ndim, dimf, inputSlices); THCUNN_check_dim_size_indices(state, indices, ndim, dimt, outputTime); THCUNN_check_dim_size_indices(state, indices, ndim, dimh, outputHeight); THCUNN_check_dim_size_indices(state, indices, ndim, dimw, outputWidth); } } void THNN_(VolumetricDilatedMaxPooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCIndexTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, bool ceilMode) { int batchSize; int inputSlices; int inputTime; int inputHeight; int inputWidth; int outputTime; int outputHeight; int outputWidth; int dimt = 1; int dimh = 2; int dimw = 3; int fiveDimensionalInput = THCTensor_(nDimension)(state, input) == 5; if (fiveDimensionalInput) { dimt++; dimh++; dimw++; } THCUNN_assertSameGPU(state, 3, input, indices, output); THNN_(VolumetricDilatedMaxPooling_shapeCheck)( state, input, NULL, NULL, kT, kW, kH, dT, dW, dH, padT, padW, padH, dilationT, dilationW, dilationH, ceilMode); if (THCTensor_(nDimension)(state, input) == 4) { /* sizes */ batchSize = 1; inputSlices = THCTensor_(size)(state, input, 0); inputTime = THCTensor_(size)(state, input, 1); inputHeight = THCTensor_(size)(state, input, 2); inputWidth = THCTensor_(size)(state, input, 3); } else if (fiveDimensionalInput) { /* sizes */ batchSize = THCTensor_(size)(state, input, 0); inputSlices = THCTensor_(size)(state, input, 1); inputTime = THCTensor_(size)(state, input, 2); inputHeight = THCTensor_(size)(state, input, 3); inputWidth = THCTensor_(size)(state, input, 4); } else { AT_ERROR("non-empty 4D or 5D tensor expected, got size: ", input->sizes()); } if (ceilMode) { outputTime = (int)(ceil((float)(inputTime - (dilationT * (kT - 1) + 1) + 2*padT) / dT)) + 1; outputHeight = (int)(ceil((float)(inputHeight - (dilationH * (kH - 1) + 1) + 2*padH) / dH)) + 1; outputWidth = (int)(ceil((float)(inputWidth - (dilationW * (kW - 1) + 1) + 2*padW) / dW)) + 1; } else { outputTime = (int)(floor((float)(inputTime - (dilationT * (kT - 1) + 1) + 2*padT) / dT)) + 1; outputHeight = (int)(floor((float)(inputHeight - (dilationH * (kH - 1) + 1) + 2*padH) / dH)) + 1; outputWidth = (int)(floor((float)(inputWidth - (dilationW * (kW - 1) + 1) + 2*padW) / dW)) + 1; } if (padT || padW || padH) { if ((outputTime - 1)*dT >= inputTime + padT) --outputTime; if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } if (!fiveDimensionalInput) /* 4D */ { /* resize output */ THCTensor_(resize4d)(state, output, inputSlices, outputTime, outputHeight, outputWidth); /* indices pack ti,i,j locations for each output point as uchar into each float of the tensor */ THCIndexTensor_(resize4d)(state, indices, inputSlices, outputTime, outputHeight, outputWidth); } else { /* 5D */ THCTensor_(resize5d)(state, output, batchSize, inputSlices, outputTime, outputHeight, outputWidth); // Index tensor packs index offsets as uchars into floats THCIndexTensor_(resize5d)(state, indices, batchSize, inputSlices, outputTime, outputHeight, outputWidth); fiveDimensionalInput = 1; } input = THCTensor_(newContiguous)(state, input); if (fiveDimensionalInput) { // Collapse batch and feature dimensions output = THCTensor_(newFoldBatchDim)(state, output); THCTensor *old_input = input; input = THCTensor_(newFoldBatchDim)(state, input); THCTensor_(free)(state, old_input); } else { THCTensor_(retain)(state, output); } real* inputData = THCTensor_(data)(state, input); THCDeviceTensor<real, 4> cudaOutput; cudaOutput = toDeviceTensor<real, 4>(state, output); THLongStorage *indicesSize = THLongStorage_newWithSize(4); int64_t indicesSizeRaw[4] = { batchSize * inputSlices, outputTime, outputHeight, outputWidth }; THLongStorage_rawCopy(indicesSize, indicesSizeRaw); THCIndexTensor *indices1 = THCIndexTensor_(newWithStorage)( state, THCIndexTensor_(storage)(state, indices), THCIndexTensor_(storageOffset)(state, indices), indicesSize, NULL); THLongStorage_free(indicesSize); THCDeviceTensor<THCIndex_t, 4> cudaIndices = toDeviceTensor<THCIndex_t, 4>(state, indices1); int totalZ = outputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)), THCCeilDiv(outputHeight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); switch (kW) { UPDATE_OUTPUT_KERNEL_WIDTH(1); UPDATE_OUTPUT_KERNEL_WIDTH(2); UPDATE_OUTPUT_KERNEL_WIDTH(3); UPDATE_OUTPUT_KERNEL_WIDTH(4); UPDATE_OUTPUT_KERNEL_WIDTH(5); UPDATE_OUTPUT_KERNEL_WIDTH(6); UPDATE_OUTPUT_KERNEL_WIDTH(7); default: cuda_VolumetricDilatedMaxPooling_updateOutput<<<grid, block, 0, THCState_getCurrentStream(state)>>>( inputData, inputTime, inputHeight, inputWidth, cudaIndices, cudaOutput, kT, kH, kW, dT, dH, dW, padT, padH, padW, dilationT, dilationH, dilationW, offsetZ); } THCudaCheck(cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } THCTensor_(free)(state, input); THCTensor_(free)(state, output); THCIndexTensor_(free)(state, indices1); } #undef UPDATE_OUTPUT_KERNEL_WIDTH void THNN_(VolumetricDilatedMaxPooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCIndexTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, bool ceilMode) { // TODO: gradOutput shape check // Resize and initialize result tensor. THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); int batchSize; int inputSlices; int outputTime, outputHeight, outputWidth; int inputTime, inputHeight, inputWidth; int fiveDimensionalInput = THCTensor_(nDimension)(state, input) == 5; THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput); THNN_(VolumetricDilatedMaxPooling_shapeCheck)( state, input, gradOutput, indices, kT, kW, kH, dT, dW, dH, padT, padW, padH, dilationT, dilationW, dilationH, ceilMode); if (!fiveDimensionalInput) /* 4D */ { batchSize = 1; inputSlices = THCTensor_(size)(state, input, 0); outputTime = THCTensor_(size)(state, gradOutput, 1); outputHeight = THCTensor_(size)(state, gradOutput, 2); outputWidth = THCTensor_(size)(state, gradOutput, 3); inputTime = THCTensor_(size)(state, gradInput, 1); inputHeight = THCTensor_(size)(state, gradInput, 2); inputWidth = THCTensor_(size)(state, gradInput, 3); } else { batchSize = THCTensor_(size)(state, input, 0); inputSlices = THCTensor_(size)(state, input, 1); outputTime = THCTensor_(size)(state, gradOutput, 2); outputHeight = THCTensor_(size)(state, gradOutput, 3); outputWidth = THCTensor_(size)(state, gradOutput, 4); inputTime = THCTensor_(size)(state, gradInput, 2); inputHeight = THCTensor_(size)(state, gradInput, 3); inputWidth = THCTensor_(size)(state, gradInput, 4); } gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (fiveDimensionalInput) { // Collapse batch and feature dimensions gradInput = THCTensor_(newFoldBatchDim)(state, gradInput); THCTensor *old_gradOutput = gradOutput; gradOutput = THCTensor_(newFoldBatchDim)(state, gradOutput); THCTensor_(free)(state, old_gradOutput); } else { THCTensor_(retain)(state, gradInput); } THCDeviceTensor<real, 4> cudaGradOutput; cudaGradOutput = toDeviceTensor<real, 4>(state, gradOutput); real* gradInputData = THCTensor_(data)(state, gradInput); THLongStorage *indicesSize = THLongStorage_newWithSize(4); int64_t indicesSizeRaw[4] = { batchSize * inputSlices, outputTime, outputHeight, outputWidth }; THLongStorage_rawCopy(indicesSize, indicesSizeRaw); THCIndexTensor *indices1 = THCIndexTensor_(newWithStorage)( state, THCIndexTensor_(storage)(state, indices), THCIndexTensor_(storageOffset)(state, indices), indicesSize, NULL); THLongStorage_free(indicesSize); THCDeviceTensor<THCIndex_t, 4> cudaIndices = toDeviceTensor<THCIndex_t, 4>(state, indices1); int64_t totalZ = outputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)), THCCeilDiv(outputHeight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); cuda_VolumetricDilatedMaxPooling_updateGradInput<<<grid, block, 0, THCState_getCurrentStream(state)>>>( cudaGradOutput, cudaIndices, gradInputData, inputTime, inputHeight, inputWidth, dT, dH, dW, padT, padH, padW, dilationT, dilationH, dilationW, offsetZ); THCudaCheck(cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } // cleanup THCTensor_(free)(state, gradInput); THCTensor_(free)(state, gradOutput); THCIndexTensor_(free)(state, indices1); } #endif
9eca51340513bdd76f98f1a3a775d145dddae19f.hip
// !!! This is a file automatically generated by hipify!!! /*------------------------------------------------------------------------- * * CUDA functions for texture-memory interpolation based projection * * This file has the necesary functions to perform X-ray parallel projection * operation given a geaometry, angles and image. It uses the 3D texture * memory linear interpolation to uniformily sample a path to integrate the * X-rays. * * CODE by Ander Biguri * --------------------------------------------------------------------------- --------------------------------------------------------------------------- Copyright (c) 2015, University of Bath and CERN- European Organization for Nuclear Research All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------- Contact: [email protected] Codes : https://github.com/CERN/TIGRE --------------------------------------------------------------------------- */ #include <algorithm> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "ray_interpolated_projection_parallel.hpp" #include "mex.h" #include <math.h> #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ mexPrintf("%s \n",msg);\ mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\ } \ } while (0) // Declare the texture reference. texture<float, hipTextureType3D , hipReadModeElementType> tex; #define MAXTREADS 1024 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * --->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ __global__ void kernelPixelDetector_parallel( Geometry geo, float* detector, Point3D source , Point3D deltaU, Point3D deltaV, Point3D uvOrigin, float maxdist){ unsigned long y = blockIdx.y * blockDim.y + threadIdx.y; unsigned long x = blockIdx.x * blockDim.x + threadIdx.x; unsigned long idx = x * geo.nDetecV + y; if ((x>= geo.nDetecU) | (y>= geo.nDetecV)) return; /////// Get coordinates XYZ of pixel UV int pixelV = geo.nDetecV-y-1; int pixelU = x; float vectX,vectY,vectZ; Point3D P; P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); Point3D S; S.x=(source.x+pixelU*deltaU.x+pixelV*deltaV.x); S.y=(source.y+pixelU*deltaU.y+pixelV*deltaV.y); S.z=(source.z+pixelU*deltaU.z+pixelV*deltaV.z); // Length is the ray length in normalized space double length=sqrt((S.x-P.x)*(S.x-P.x)+(S.y-P.y)*(S.y-P.y)+(S.z-P.z)*(S.z-P.z)); //now legth is an integer of Nsamples that are required on this line length=ceil(length/geo.accuracy);//Divide the directional vector by an integer vectX=(P.x -S.x)/(length); vectY=(P.y -S.y)/(length); vectZ=(P.z -S.z)/(length); // //Integrate over the line float tx,ty,tz; float sum=0; float i; // limit the amount of mem access after the cube, but before the detector. if ((geo.DSO/geo.dVoxelX+maxdist)/geo.accuracy < length) length=ceil((geo.DSO/geo.dVoxelX+maxdist)/geo.accuracy); //Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel") for (i=floor(maxdist/geo.accuracy); i<=length; i=i+1){ tx=vectX*i+S.x; ty=vectY*i+S.y; tz=vectZ*i+S.z; sum += tex3D(tex, tx+0.5, ty+0.5, tz+0.5); // this line is 94% of time. } float deltalength=sqrt((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+ (vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) ); detector[idx]=sum*deltalength; } int interpolation_projection_parallel(float const * const img, Geometry geo, float** result,float const * const alphas,int nalpha){ // copy data to CUDA memory hipArray *d_imagedata = 0; const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipMalloc3DArray(&d_imagedata, &channelDesc, extent); cudaCheckErrors("hipMalloc3D error 3D tex"); hipMemcpy3DParms copyParams = { 0 }; copyParams.srcPtr = make_hipPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_imagedata; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams); cudaCheckErrors("hipMemcpy3D fail"); // Configure texture options tex.normalized = false; tex.filterMode = hipFilterModeLinear; tex.addressMode[0] = hipAddressModeBorder; tex.addressMode[1] = hipAddressModeBorder; tex.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex, d_imagedata, channelDesc); cudaCheckErrors("3D texture memory bind fail"); //Done! Image put into texture memory. size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float); float* dProjection; hipMalloc((void**)&dProjection, num_bytes); cudaCheckErrors("hipMalloc fail"); // If we are going to time bool timekernel=false; hipEvent_t start, stop; float elapsedTime; if (timekernel){ hipEventCreate(&start); hipEventRecord(start,0); } // 16x16 gave the best performance empirically // Funnily that makes it compatible with most GPUs..... dim3 grid(ceil((float)geo.nDetecU/32),ceil((float)geo.nDetecV/32),1); dim3 block(32,32,1); Point3D source, deltaU, deltaV, uvOrigin; float maxdist; for (unsigned int i=0;i<nalpha;i++){ geo.alpha=alphas[i]; //precomute distances for faster execution maxdist=maxDistanceCubeXY(geo,geo.alpha,i); //Precompute per angle constant stuff for speed computeDeltas_parallel(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source); //Interpolation!! hipLaunchKernelGGL(( kernelPixelDetector_parallel), dim3(grid),dim3(block), 0, 0, geo,dProjection, source, deltaU, deltaV, uvOrigin,floor(maxdist)); cudaCheckErrors("Kernel fail"); // copy result to host hipMemcpy(result[i], dProjection, num_bytes, hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpy fail"); } if (timekernel){ hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start,stop); mexPrintf("%f\n" ,elapsedTime); } hipUnbindTexture(tex); cudaCheckErrors("Unbind fail"); hipFree(dProjection); hipFreeArray(d_imagedata); cudaCheckErrors("hipFree d_imagedata fail"); hipDeviceReset(); return 0; } /* This code precomputes The location of the source and the Delta U and delta V (in the warped space) * to compute the locations of the x-rays. While it seems verbose and overly-optimized, * it does saves about 30% of each of the kernel calls. Thats something! **/ void computeDeltas_parallel(Geometry geo, float alpha,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){ Point3D S; S.x=geo.DSO; S.y=geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); S.z=geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); //End point Point3D P,Pu0,Pv0; P.x =-(geo.DSD-geo.DSO); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pu0.x=-(geo.DSD-geo.DSO); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pv0.x=-(geo.DSD-geo.DSO); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1); // Geomtric trasnformations: //1: Offset detector //P.x P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i]; Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i]; Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i]; //S doesnt need to chagne //3: Rotate (around z)! Point3D Pfinal, Pfinalu0, Pfinalv0; Pfinal.x =P.x*cos(geo.alpha)-P.y*sin(geo.alpha); Pfinal.y =P.y*cos(geo.alpha)+P.x*sin(geo.alpha); Pfinal.z =P.z; Pfinalu0.x=Pu0.x*cos(geo.alpha)-Pu0.y*sin(geo.alpha); Pfinalu0.y=Pu0.y*cos(geo.alpha)+Pu0.x*sin(geo.alpha); Pfinalu0.z=Pu0.z; Pfinalv0.x=Pv0.x*cos(geo.alpha)-Pv0.y*sin(geo.alpha); Pfinalv0.y=Pv0.y*cos(geo.alpha)+Pv0.x*sin(geo.alpha); Pfinalv0.z=Pv0.z; Point3D S2; S2.x=S.x*cos(geo.alpha)-S.y*sin(geo.alpha); S2.y=S.y*cos(geo.alpha)+S.x*sin(geo.alpha); S2.z=S.z; //2: Offset image (instead of offseting image, -offset everything else) Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i]; Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i]; Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i]; S2.x=S2.x-geo.offOrigX[i]; S2.y=S2.y-geo.offOrigY[i]; S2.z=S2.z-geo.offOrigZ[i]; // As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation); Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; S2.x =S2.x+geo.sVoxelX/2-geo.dVoxelX/2; S2.y =S2.y+geo.sVoxelY/2-geo.dVoxelY/2; S2.z =S2.z +geo.sVoxelZ/2-geo.dVoxelZ/2; //4. Scale everything so dVoxel==1 Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ; Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ; Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ; S2.x =S2.x/geo.dVoxelX; S2.y =S2.y/geo.dVoxelY; S2.z =S2.z/geo.dVoxelZ; //5. apply COR. Wherever everything was, now its offesetd by a bit float CORx, CORy; CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX; CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY; Pfinal.x+=CORx; Pfinal.y+=CORy; Pfinalu0.x+=CORx; Pfinalu0.y+=CORy; Pfinalv0.x+=CORx; Pfinalv0.y+=CORy; S2.x+=CORx; S2.y+=CORy; // return *uvorigin=Pfinal; deltaU->x=Pfinalu0.x-Pfinal.x; deltaU->y=Pfinalu0.y-Pfinal.y; deltaU->z=Pfinalu0.z-Pfinal.z; deltaV->x=Pfinalv0.x-Pfinal.x; deltaV->y=Pfinalv0.y-Pfinal.y; deltaV->z=Pfinalv0.z-Pfinal.z; *source=S2; } // #ifndef PROJECTION_HPP // // float maxDistanceCubeXY(Geometry geo, float alpha,int i){ // /////////// // // Compute initial "t" so we access safely as less as out of bounds as possible. // ////////// // // // float maxCubX,maxCubY; // // Forgetting Z, compute mas distance: diagonal+offset // maxCubX=(geo.sVoxelX/2+ abs(geo.offOrigX[i]))/geo.dVoxelX; // maxCubY=(geo.sVoxelY/2+ abs(geo.offOrigY[i]))/geo.dVoxelY; // // return geo.DSO/geo.dVoxelX-sqrt(maxCubX*maxCubX+maxCubY*maxCubY); // // } // #endif
9eca51340513bdd76f98f1a3a775d145dddae19f.cu
/*------------------------------------------------------------------------- * * CUDA functions for texture-memory interpolation based projection * * This file has the necesary functions to perform X-ray parallel projection * operation given a geaometry, angles and image. It uses the 3D texture * memory linear interpolation to uniformily sample a path to integrate the * X-rays. * * CODE by Ander Biguri * --------------------------------------------------------------------------- --------------------------------------------------------------------------- Copyright (c) 2015, University of Bath and CERN- European Organization for Nuclear Research All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------- Contact: [email protected] Codes : https://github.com/CERN/TIGRE --------------------------------------------------------------------------- */ #include <algorithm> #include <cuda_runtime_api.h> #include <cuda.h> #include "ray_interpolated_projection_parallel.hpp" #include "mex.h" #include <math.h> #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ mexPrintf("%s \n",msg);\ mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\ } \ } while (0) // Declare the texture reference. texture<float, cudaTextureType3D , cudaReadModeElementType> tex; #define MAXTREADS 1024 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * --->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ __global__ void kernelPixelDetector_parallel( Geometry geo, float* detector, Point3D source , Point3D deltaU, Point3D deltaV, Point3D uvOrigin, float maxdist){ unsigned long y = blockIdx.y * blockDim.y + threadIdx.y; unsigned long x = blockIdx.x * blockDim.x + threadIdx.x; unsigned long idx = x * geo.nDetecV + y; if ((x>= geo.nDetecU) | (y>= geo.nDetecV)) return; /////// Get coordinates XYZ of pixel UV int pixelV = geo.nDetecV-y-1; int pixelU = x; float vectX,vectY,vectZ; Point3D P; P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); Point3D S; S.x=(source.x+pixelU*deltaU.x+pixelV*deltaV.x); S.y=(source.y+pixelU*deltaU.y+pixelV*deltaV.y); S.z=(source.z+pixelU*deltaU.z+pixelV*deltaV.z); // Length is the ray length in normalized space double length=sqrt((S.x-P.x)*(S.x-P.x)+(S.y-P.y)*(S.y-P.y)+(S.z-P.z)*(S.z-P.z)); //now legth is an integer of Nsamples that are required on this line length=ceil(length/geo.accuracy);//Divide the directional vector by an integer vectX=(P.x -S.x)/(length); vectY=(P.y -S.y)/(length); vectZ=(P.z -S.z)/(length); // //Integrate over the line float tx,ty,tz; float sum=0; float i; // limit the amount of mem access after the cube, but before the detector. if ((geo.DSO/geo.dVoxelX+maxdist)/geo.accuracy < length) length=ceil((geo.DSO/geo.dVoxelX+maxdist)/geo.accuracy); //Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel") for (i=floor(maxdist/geo.accuracy); i<=length; i=i+1){ tx=vectX*i+S.x; ty=vectY*i+S.y; tz=vectZ*i+S.z; sum += tex3D(tex, tx+0.5, ty+0.5, tz+0.5); // this line is 94% of time. } float deltalength=sqrt((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+ (vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) ); detector[idx]=sum*deltalength; } int interpolation_projection_parallel(float const * const img, Geometry geo, float** result,float const * const alphas,int nalpha){ // copy data to CUDA memory cudaArray *d_imagedata = 0; const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaMalloc3DArray(&d_imagedata, &channelDesc, extent); cudaCheckErrors("cudaMalloc3D error 3D tex"); cudaMemcpy3DParms copyParams = { 0 }; copyParams.srcPtr = make_cudaPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_imagedata; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams); cudaCheckErrors("cudaMemcpy3D fail"); // Configure texture options tex.normalized = false; tex.filterMode = cudaFilterModeLinear; tex.addressMode[0] = cudaAddressModeBorder; tex.addressMode[1] = cudaAddressModeBorder; tex.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex, d_imagedata, channelDesc); cudaCheckErrors("3D texture memory bind fail"); //Done! Image put into texture memory. size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float); float* dProjection; cudaMalloc((void**)&dProjection, num_bytes); cudaCheckErrors("cudaMalloc fail"); // If we are going to time bool timekernel=false; cudaEvent_t start, stop; float elapsedTime; if (timekernel){ cudaEventCreate(&start); cudaEventRecord(start,0); } // 16x16 gave the best performance empirically // Funnily that makes it compatible with most GPUs..... dim3 grid(ceil((float)geo.nDetecU/32),ceil((float)geo.nDetecV/32),1); dim3 block(32,32,1); Point3D source, deltaU, deltaV, uvOrigin; float maxdist; for (unsigned int i=0;i<nalpha;i++){ geo.alpha=alphas[i]; //precomute distances for faster execution maxdist=maxDistanceCubeXY(geo,geo.alpha,i); //Precompute per angle constant stuff for speed computeDeltas_parallel(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source); //Interpolation!! kernelPixelDetector_parallel<<<grid,block>>>(geo,dProjection, source, deltaU, deltaV, uvOrigin,floor(maxdist)); cudaCheckErrors("Kernel fail"); // copy result to host cudaMemcpy(result[i], dProjection, num_bytes, cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpy fail"); } if (timekernel){ cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); mexPrintf("%f\n" ,elapsedTime); } cudaUnbindTexture(tex); cudaCheckErrors("Unbind fail"); cudaFree(dProjection); cudaFreeArray(d_imagedata); cudaCheckErrors("cudaFree d_imagedata fail"); cudaDeviceReset(); return 0; } /* This code precomputes The location of the source and the Delta U and delta V (in the warped space) * to compute the locations of the x-rays. While it seems verbose and overly-optimized, * it does saves about 30% of each of the kernel calls. Thats something! **/ void computeDeltas_parallel(Geometry geo, float alpha,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){ Point3D S; S.x=geo.DSO; S.y=geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); S.z=geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); //End point Point3D P,Pu0,Pv0; P.x =-(geo.DSD-geo.DSO); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pu0.x=-(geo.DSD-geo.DSO); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pv0.x=-(geo.DSD-geo.DSO); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1); // Geomtric trasnformations: //1: Offset detector //P.x P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i]; Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i]; Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i]; //S doesnt need to chagne //3: Rotate (around z)! Point3D Pfinal, Pfinalu0, Pfinalv0; Pfinal.x =P.x*cos(geo.alpha)-P.y*sin(geo.alpha); Pfinal.y =P.y*cos(geo.alpha)+P.x*sin(geo.alpha); Pfinal.z =P.z; Pfinalu0.x=Pu0.x*cos(geo.alpha)-Pu0.y*sin(geo.alpha); Pfinalu0.y=Pu0.y*cos(geo.alpha)+Pu0.x*sin(geo.alpha); Pfinalu0.z=Pu0.z; Pfinalv0.x=Pv0.x*cos(geo.alpha)-Pv0.y*sin(geo.alpha); Pfinalv0.y=Pv0.y*cos(geo.alpha)+Pv0.x*sin(geo.alpha); Pfinalv0.z=Pv0.z; Point3D S2; S2.x=S.x*cos(geo.alpha)-S.y*sin(geo.alpha); S2.y=S.y*cos(geo.alpha)+S.x*sin(geo.alpha); S2.z=S.z; //2: Offset image (instead of offseting image, -offset everything else) Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i]; Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i]; Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i]; S2.x=S2.x-geo.offOrigX[i]; S2.y=S2.y-geo.offOrigY[i]; S2.z=S2.z-geo.offOrigZ[i]; // As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation); Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; S2.x =S2.x+geo.sVoxelX/2-geo.dVoxelX/2; S2.y =S2.y+geo.sVoxelY/2-geo.dVoxelY/2; S2.z =S2.z +geo.sVoxelZ/2-geo.dVoxelZ/2; //4. Scale everything so dVoxel==1 Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ; Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ; Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ; S2.x =S2.x/geo.dVoxelX; S2.y =S2.y/geo.dVoxelY; S2.z =S2.z/geo.dVoxelZ; //5. apply COR. Wherever everything was, now its offesetd by a bit float CORx, CORy; CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX; CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY; Pfinal.x+=CORx; Pfinal.y+=CORy; Pfinalu0.x+=CORx; Pfinalu0.y+=CORy; Pfinalv0.x+=CORx; Pfinalv0.y+=CORy; S2.x+=CORx; S2.y+=CORy; // return *uvorigin=Pfinal; deltaU->x=Pfinalu0.x-Pfinal.x; deltaU->y=Pfinalu0.y-Pfinal.y; deltaU->z=Pfinalu0.z-Pfinal.z; deltaV->x=Pfinalv0.x-Pfinal.x; deltaV->y=Pfinalv0.y-Pfinal.y; deltaV->z=Pfinalv0.z-Pfinal.z; *source=S2; } // #ifndef PROJECTION_HPP // // float maxDistanceCubeXY(Geometry geo, float alpha,int i){ // /////////// // // Compute initial "t" so we access safely as less as out of bounds as possible. // ////////// // // // float maxCubX,maxCubY; // // Forgetting Z, compute mas distance: diagonal+offset // maxCubX=(geo.sVoxelX/2+ abs(geo.offOrigX[i]))/geo.dVoxelX; // maxCubY=(geo.sVoxelY/2+ abs(geo.offOrigY[i]))/geo.dVoxelY; // // return geo.DSO/geo.dVoxelX-sqrt(maxCubX*maxCubX+maxCubY*maxCubY); // // } // #endif
ae8cb7300791e4276f189fb98961c0bd9baf303f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" constexpr int num_threads = 256; template <typename T> __global__ void chamfer_distance_kernel( const T* input, const T* target, int batch_size, int n, int m, int channels, int64_t* __restrict__ index, T* __restrict__ sqdist) { __shared__ T smem[num_threads * 3]; int b = blockIdx.x; input += b * n * 3; target += b * m * 3; index += b * n; sqdist += b * n; int tid = threadIdx.x; int base = 0; for (int i = tid; i < m; i += num_threads) { smem[tid * 3 + 0] = target[i * 3 + 0]; smem[tid * 3 + 1] = target[i * 3 + 1]; smem[tid * 3 + 2] = target[i * 3 + 2]; __syncthreads(); int kmax = min(m, base + num_threads) - base; for (int j = 0; j < n; ++j) { T x1 = input[j * 3 + 0]; T y1 = input[j * 3 + 1]; T z1 = input[j * 3 + 2]; T minval = sqdist[j]; int64_t argmin = index[j]; for (int k = 0; k < kmax; ++k) { T x2 = smem[k * 3 + 0] - x1; T y2 = smem[k * 3 + 1] - y1; T z2 = smem[k * 3 + 2] - z1; T dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < minval) { minval = dist; argmin = base + k; } } if (minval < sqdist[j]) { sqdist[j] = minval; index[j] = argmin; } } base += num_threads; __syncthreads(); } } std::vector<at::Tensor> chamfer_distance_cuda( const at::Tensor& input, const at::Tensor& target) { int batch_size = input.size(0); int n = input.size(1); int m = target.size(1); int channels = input.size(2); at::Tensor index1 = at::zeros({batch_size, n}, input.options().dtype(at::kLong)); at::Tensor index2 = at::zeros({batch_size, m}, input.options().dtype(at::kLong)); at::Tensor sqdist1 = at::zeros({batch_size, n}, input.options()).fill_(1e10); at::Tensor sqdist2 = at::zeros({batch_size, m}, input.options()).fill_(1e10); AT_DISPATCH_FLOATING_TYPES(input.type(), "chamfer_distance_cuda", [&] { dim3 block(num_threads); dim3 grid(batch_size); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( chamfer_distance_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream, input.contiguous().data<scalar_t>(), target.contiguous().data<scalar_t>(), batch_size, n, m, channels, index1.data<int64_t>(), sqdist1.data<scalar_t>()); hipLaunchKernelGGL(( chamfer_distance_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream, target.contiguous().data<scalar_t>(), input.contiguous().data<scalar_t>(), batch_size, m, n, channels, index2.data<int64_t>(), sqdist2.data<scalar_t>()); }); return {index1, index2, sqdist1, sqdist2}; } template <typename T> __global__ void chamfer_distance_grad_kernel( const T* __restrict__ grad, const T* input, const T* target, const int64_t* __restrict__ index, int batch_size, int n, int m, int channels, T* __restrict__ output1, T* __restrict__ output2) { int b = blockIdx.x; grad += b * n; input += b * n * 3; target += b * m * 3; index += b * n; output1 += b * n * 3; output2 += b * m * 3; for (int i = 0; i < n; i += num_threads) { T x1 = input[i + 0]; T y1 = input[i + 1]; T z1 = input[i + 2]; int64_t j = index[i]; T x2 = target[j + 0]; T y2 = target[j + 1]; T z2 = target[j + 2]; T g = grad[i] * 2; atomicAdd(output1 + i * 3 + 0, g * (x1 - x2)); atomicAdd(output1 + i * 3 + 1, g * (y1 - y2)); atomicAdd(output1 + i * 3 + 2, g * (z1 - z2)); atomicAdd(output2 + j * 3 + 0, g * (x2 - x1)); atomicAdd(output2 + j * 3 + 1, g * (y2 - y1)); atomicAdd(output2 + j * 3 + 2, g * (z2 - z1)); } } std::vector<at::Tensor> chamfer_distance_grad_cuda( const at::Tensor& grad1, const at::Tensor& grad2, const at::Tensor& input, const at::Tensor& target, const at::Tensor& index1, const at::Tensor& index2) { int batch_size = input.size(0); int n = input.size(1); int m = target.size(1); int channels = input.size(2); at::Tensor output1 = at::zeros_like(input); at::Tensor output2 = at::zeros_like(target); AT_DISPATCH_FLOATING_TYPES(input.type(), "chamfer_distance_grad_cuda", [&] { dim3 block(num_threads); dim3 grid(batch_size); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( chamfer_distance_grad_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream, grad1.contiguous().data<scalar_t>(), input.contiguous().data<scalar_t>(), target.contiguous().data<scalar_t>(), index1.data<int64_t>(), batch_size, n, m, channels, output1.data<scalar_t>(), output2.data<scalar_t>()); hipLaunchKernelGGL(( chamfer_distance_grad_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream, grad2.contiguous().data<scalar_t>(), target.contiguous().data<scalar_t>(), input.contiguous().data<scalar_t>(), index2.data<int64_t>(), batch_size, m, n, channels, output2.data<scalar_t>(), output1.data<scalar_t>()); }); return {output1, output2}; }
ae8cb7300791e4276f189fb98961c0bd9baf303f.cu
#include "cuda.h" constexpr int num_threads = 256; template <typename T> __global__ void chamfer_distance_kernel( const T* input, const T* target, int batch_size, int n, int m, int channels, int64_t* __restrict__ index, T* __restrict__ sqdist) { __shared__ T smem[num_threads * 3]; int b = blockIdx.x; input += b * n * 3; target += b * m * 3; index += b * n; sqdist += b * n; int tid = threadIdx.x; int base = 0; for (int i = tid; i < m; i += num_threads) { smem[tid * 3 + 0] = target[i * 3 + 0]; smem[tid * 3 + 1] = target[i * 3 + 1]; smem[tid * 3 + 2] = target[i * 3 + 2]; __syncthreads(); int kmax = min(m, base + num_threads) - base; for (int j = 0; j < n; ++j) { T x1 = input[j * 3 + 0]; T y1 = input[j * 3 + 1]; T z1 = input[j * 3 + 2]; T minval = sqdist[j]; int64_t argmin = index[j]; for (int k = 0; k < kmax; ++k) { T x2 = smem[k * 3 + 0] - x1; T y2 = smem[k * 3 + 1] - y1; T z2 = smem[k * 3 + 2] - z1; T dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < minval) { minval = dist; argmin = base + k; } } if (minval < sqdist[j]) { sqdist[j] = minval; index[j] = argmin; } } base += num_threads; __syncthreads(); } } std::vector<at::Tensor> chamfer_distance_cuda( const at::Tensor& input, const at::Tensor& target) { int batch_size = input.size(0); int n = input.size(1); int m = target.size(1); int channels = input.size(2); at::Tensor index1 = at::zeros({batch_size, n}, input.options().dtype(at::kLong)); at::Tensor index2 = at::zeros({batch_size, m}, input.options().dtype(at::kLong)); at::Tensor sqdist1 = at::zeros({batch_size, n}, input.options()).fill_(1e10); at::Tensor sqdist2 = at::zeros({batch_size, m}, input.options()).fill_(1e10); AT_DISPATCH_FLOATING_TYPES(input.type(), "chamfer_distance_cuda", [&] { dim3 block(num_threads); dim3 grid(batch_size); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); chamfer_distance_kernel<scalar_t><<<grid, block, 0, stream>>>( input.contiguous().data<scalar_t>(), target.contiguous().data<scalar_t>(), batch_size, n, m, channels, index1.data<int64_t>(), sqdist1.data<scalar_t>()); chamfer_distance_kernel<scalar_t><<<grid, block, 0, stream>>>( target.contiguous().data<scalar_t>(), input.contiguous().data<scalar_t>(), batch_size, m, n, channels, index2.data<int64_t>(), sqdist2.data<scalar_t>()); }); return {index1, index2, sqdist1, sqdist2}; } template <typename T> __global__ void chamfer_distance_grad_kernel( const T* __restrict__ grad, const T* input, const T* target, const int64_t* __restrict__ index, int batch_size, int n, int m, int channels, T* __restrict__ output1, T* __restrict__ output2) { int b = blockIdx.x; grad += b * n; input += b * n * 3; target += b * m * 3; index += b * n; output1 += b * n * 3; output2 += b * m * 3; for (int i = 0; i < n; i += num_threads) { T x1 = input[i + 0]; T y1 = input[i + 1]; T z1 = input[i + 2]; int64_t j = index[i]; T x2 = target[j + 0]; T y2 = target[j + 1]; T z2 = target[j + 2]; T g = grad[i] * 2; atomicAdd(output1 + i * 3 + 0, g * (x1 - x2)); atomicAdd(output1 + i * 3 + 1, g * (y1 - y2)); atomicAdd(output1 + i * 3 + 2, g * (z1 - z2)); atomicAdd(output2 + j * 3 + 0, g * (x2 - x1)); atomicAdd(output2 + j * 3 + 1, g * (y2 - y1)); atomicAdd(output2 + j * 3 + 2, g * (z2 - z1)); } } std::vector<at::Tensor> chamfer_distance_grad_cuda( const at::Tensor& grad1, const at::Tensor& grad2, const at::Tensor& input, const at::Tensor& target, const at::Tensor& index1, const at::Tensor& index2) { int batch_size = input.size(0); int n = input.size(1); int m = target.size(1); int channels = input.size(2); at::Tensor output1 = at::zeros_like(input); at::Tensor output2 = at::zeros_like(target); AT_DISPATCH_FLOATING_TYPES(input.type(), "chamfer_distance_grad_cuda", [&] { dim3 block(num_threads); dim3 grid(batch_size); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); chamfer_distance_grad_kernel<scalar_t><<<grid, block, 0, stream>>>( grad1.contiguous().data<scalar_t>(), input.contiguous().data<scalar_t>(), target.contiguous().data<scalar_t>(), index1.data<int64_t>(), batch_size, n, m, channels, output1.data<scalar_t>(), output2.data<scalar_t>()); chamfer_distance_grad_kernel<scalar_t><<<grid, block, 0, stream>>>( grad2.contiguous().data<scalar_t>(), target.contiguous().data<scalar_t>(), input.contiguous().data<scalar_t>(), index2.data<int64_t>(), batch_size, m, n, channels, output2.data<scalar_t>(), output1.data<scalar_t>()); }); return {output1, output2}; }
cfe58528b4831db75d6d133501a7c9a8b7588eb7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (C) by Argonne National Laboratory * See COPYRIGHT in top-level directory */ #include <mpi.h> #include <stdio.h> #include <assert.h> const int N = 1000000; const int a = 2.0; const float x_val = 1.0f; const float y_val = 2.0f; const float exp_result = 4.0f; static int check_result(float *y) { float maxError = 0.0f; int errs = 0; for (int i = 0; i < N; i++) { if (abs(y[i] - exp_result) > 0.01) { errs++; maxError = max(maxError, abs(y[i] - exp_result)); } } if (errs > 0) { printf("%d errors, Max error: %f\n", errs, maxError); } return errs; } __global__ void saxpy(int n, float a, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = a*x[i] + y[i]; } int main(void) { int errs = 0; hipStream_t stream; hipStreamCreate(&stream); int mpi_errno; int rank, size; MPI_Init(NULL, NULL); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); if (size < 2) { printf("This test require 2 processes\n"); exit(1); } float *x, *y, *d_x, *d_y; x = (float*)malloc(N*sizeof(float)); y = (float*)malloc(N*sizeof(float)); hipMalloc(&d_x, N*sizeof(float)); hipMalloc(&d_y, N*sizeof(float)); MPI_Info info; MPI_Info_create(&info); MPI_Info_set(info, "type", "hipStream_t"); MPIX_Info_set_hex(info, "value", &stream, sizeof(stream)); MPIX_Stream mpi_stream; MPIX_Stream_create(info, &mpi_stream); MPI_Info_free(&info); MPI_Comm stream_comm; MPIX_Stream_comm_create(MPI_COMM_WORLD, mpi_stream, &stream_comm); /* Rank 0 sends x data to Rank 1, Rank 1 performs a * x + y and checks result */ if (rank == 0) { for (int i = 0; i < N; i++) { x[i] = x_val; } hipMemcpyAsync(d_x, x, N*sizeof(float), hipMemcpyHostToDevice, stream); mpi_errno = MPIX_Send_enqueue(d_x, N, MPI_FLOAT, 1, 0, stream_comm); assert(mpi_errno == MPI_SUCCESS); hipStreamSynchronize(stream); } else if (rank == 1) { for (int i = 0; i < N; i++) { y[i] = y_val; } hipMemcpyAsync(d_y, y, N*sizeof(float), hipMemcpyHostToDevice, stream); mpi_errno = MPIX_Recv_enqueue(d_x, N, MPI_FLOAT, 0, 0, stream_comm, MPI_STATUS_IGNORE); assert(mpi_errno == MPI_SUCCESS); hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, stream, N, a, d_x, d_y); hipMemcpyAsync(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost, stream); hipStreamSynchronize(stream); errs += check_result(y); } /* Test again with MPIX_Isend_enqueue and MPIX_Wait_enqueue */ if (rank == 0) { for (int i = 0; i < N; i++) { x[i] = x_val; } /* we are directly sending from x in this test */ MPI_Request req; mpi_errno = MPIX_Isend_enqueue(x, N, MPI_FLOAT, 1, 0, stream_comm, &req); assert(mpi_errno == MPI_SUCCESS); /* req won't reset to MPI_REQUEST_NULL, but user shouldn't use it afterward */ mpi_errno = MPIX_Wait_enqueue(&req, MPI_STATUS_IGNORE); assert(mpi_errno == MPI_SUCCESS); hipStreamSynchronize(stream); } else if (rank == 1) { /* reset d_x, d_y */ for (int i = 0; i < N; i++) { x[i] = 0.0; y[i] = y_val; } hipMemcpyAsync(d_x, x, N*sizeof(float), hipMemcpyHostToDevice, stream); hipMemcpyAsync(d_y, y, N*sizeof(float), hipMemcpyHostToDevice, stream); MPI_Request req; mpi_errno = MPIX_Irecv_enqueue(d_x, N, MPI_FLOAT, 0, 0, stream_comm, &req); assert(mpi_errno == MPI_SUCCESS); mpi_errno = MPIX_Waitall_enqueue(1, &req, MPI_STATUSES_IGNORE); assert(mpi_errno == MPI_SUCCESS); hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, stream, N, a, d_x, d_y); hipMemcpyAsync(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost, stream); hipStreamSynchronize(stream); errs += check_result(y); } MPI_Comm_free(&stream_comm); MPIX_Stream_free(&mpi_stream); hipFree(d_x); hipFree(d_y); free(x); free(y); hipStreamDestroy(stream); MPI_Finalize(); if (rank == 1 && errs == 0) { printf("No Errors\n"); } return errs; }
cfe58528b4831db75d6d133501a7c9a8b7588eb7.cu
/* * Copyright (C) by Argonne National Laboratory * See COPYRIGHT in top-level directory */ #include <mpi.h> #include <stdio.h> #include <assert.h> const int N = 1000000; const int a = 2.0; const float x_val = 1.0f; const float y_val = 2.0f; const float exp_result = 4.0f; static int check_result(float *y) { float maxError = 0.0f; int errs = 0; for (int i = 0; i < N; i++) { if (abs(y[i] - exp_result) > 0.01) { errs++; maxError = max(maxError, abs(y[i] - exp_result)); } } if (errs > 0) { printf("%d errors, Max error: %f\n", errs, maxError); } return errs; } __global__ void saxpy(int n, float a, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = a*x[i] + y[i]; } int main(void) { int errs = 0; cudaStream_t stream; cudaStreamCreate(&stream); int mpi_errno; int rank, size; MPI_Init(NULL, NULL); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); if (size < 2) { printf("This test require 2 processes\n"); exit(1); } float *x, *y, *d_x, *d_y; x = (float*)malloc(N*sizeof(float)); y = (float*)malloc(N*sizeof(float)); cudaMalloc(&d_x, N*sizeof(float)); cudaMalloc(&d_y, N*sizeof(float)); MPI_Info info; MPI_Info_create(&info); MPI_Info_set(info, "type", "cudaStream_t"); MPIX_Info_set_hex(info, "value", &stream, sizeof(stream)); MPIX_Stream mpi_stream; MPIX_Stream_create(info, &mpi_stream); MPI_Info_free(&info); MPI_Comm stream_comm; MPIX_Stream_comm_create(MPI_COMM_WORLD, mpi_stream, &stream_comm); /* Rank 0 sends x data to Rank 1, Rank 1 performs a * x + y and checks result */ if (rank == 0) { for (int i = 0; i < N; i++) { x[i] = x_val; } cudaMemcpyAsync(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice, stream); mpi_errno = MPIX_Send_enqueue(d_x, N, MPI_FLOAT, 1, 0, stream_comm); assert(mpi_errno == MPI_SUCCESS); cudaStreamSynchronize(stream); } else if (rank == 1) { for (int i = 0; i < N; i++) { y[i] = y_val; } cudaMemcpyAsync(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice, stream); mpi_errno = MPIX_Recv_enqueue(d_x, N, MPI_FLOAT, 0, 0, stream_comm, MPI_STATUS_IGNORE); assert(mpi_errno == MPI_SUCCESS); saxpy<<<(N+255)/256, 256, 0, stream>>>(N, a, d_x, d_y); cudaMemcpyAsync(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost, stream); cudaStreamSynchronize(stream); errs += check_result(y); } /* Test again with MPIX_Isend_enqueue and MPIX_Wait_enqueue */ if (rank == 0) { for (int i = 0; i < N; i++) { x[i] = x_val; } /* we are directly sending from x in this test */ MPI_Request req; mpi_errno = MPIX_Isend_enqueue(x, N, MPI_FLOAT, 1, 0, stream_comm, &req); assert(mpi_errno == MPI_SUCCESS); /* req won't reset to MPI_REQUEST_NULL, but user shouldn't use it afterward */ mpi_errno = MPIX_Wait_enqueue(&req, MPI_STATUS_IGNORE); assert(mpi_errno == MPI_SUCCESS); cudaStreamSynchronize(stream); } else if (rank == 1) { /* reset d_x, d_y */ for (int i = 0; i < N; i++) { x[i] = 0.0; y[i] = y_val; } cudaMemcpyAsync(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice, stream); MPI_Request req; mpi_errno = MPIX_Irecv_enqueue(d_x, N, MPI_FLOAT, 0, 0, stream_comm, &req); assert(mpi_errno == MPI_SUCCESS); mpi_errno = MPIX_Waitall_enqueue(1, &req, MPI_STATUSES_IGNORE); assert(mpi_errno == MPI_SUCCESS); saxpy<<<(N+255)/256, 256, 0, stream>>>(N, a, d_x, d_y); cudaMemcpyAsync(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost, stream); cudaStreamSynchronize(stream); errs += check_result(y); } MPI_Comm_free(&stream_comm); MPIX_Stream_free(&mpi_stream); cudaFree(d_x); cudaFree(d_y); free(x); free(y); cudaStreamDestroy(stream); MPI_Finalize(); if (rank == 1 && errs == 0) { printf("No Errors\n"); } return errs; }
74d77ac56fa04bc3662167630b1454db8f63fbd8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // relu.cpp #include <algorithm> #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/relu.hpp> #include <nbla/variable.hpp> namespace nbla { template <typename T> __global__ void kernel_relu_forward(const int num, T *y, const T *x) { NBLA_CUDA_KERNEL_LOOP(idx, num) { y[idx] = max(T(0), x[idx]); } } template <typename T, bool accum = true> __global__ void kernel_relu_backward(const int num, T *dx, const T *x, const T *dy) { NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : 0) + (x[idx] > 0 ? dy[idx] : 0); } } template <class T> void ReLUCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(std::stoi(this->ctx_.device_id)); const T *x = inputs[0]->get_data_pointer<T>(this->ctx_); T *y = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_); size_t size = inputs[0]->size(); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_relu_forward, size, y, x); } template <class T> void ReLUCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } cuda_set_device(std::stoi(this->ctx_.device_id)); const T *x = inputs[0]->get_data_pointer<T>(this->ctx_); T *dx = inputs[0]->cast_grad_and_get_pointer<T>(this->ctx_); const T *dy = outputs[0]->get_grad_pointer<T>(this->ctx_); size_t size = inputs[0]->size(); if (dx != dy && accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_relu_backward<T, true>), size, dx, x, dy); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_relu_backward<T, false>), size, dx, x, dy); } } // Template instantiation template class ReLUCuda<float>; }
74d77ac56fa04bc3662167630b1454db8f63fbd8.cu
// Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // relu.cpp #include <algorithm> #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/relu.hpp> #include <nbla/variable.hpp> namespace nbla { template <typename T> __global__ void kernel_relu_forward(const int num, T *y, const T *x) { NBLA_CUDA_KERNEL_LOOP(idx, num) { y[idx] = max(T(0), x[idx]); } } template <typename T, bool accum = true> __global__ void kernel_relu_backward(const int num, T *dx, const T *x, const T *dy) { NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : 0) + (x[idx] > 0 ? dy[idx] : 0); } } template <class T> void ReLUCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(std::stoi(this->ctx_.device_id)); const T *x = inputs[0]->get_data_pointer<T>(this->ctx_); T *y = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_); size_t size = inputs[0]->size(); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_relu_forward, size, y, x); } template <class T> void ReLUCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } cuda_set_device(std::stoi(this->ctx_.device_id)); const T *x = inputs[0]->get_data_pointer<T>(this->ctx_); T *dx = inputs[0]->cast_grad_and_get_pointer<T>(this->ctx_); const T *dy = outputs[0]->get_grad_pointer<T>(this->ctx_); size_t size = inputs[0]->size(); if (dx != dy && accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_relu_backward<T, true>), size, dx, x, dy); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_relu_backward<T, false>), size, dx, x, dy); } } // Template instantiation template class ReLUCuda<float>; }
87b716372ff37af24d688257e628c7846a105ac3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #define SIM_THREADS 10 // how many simultaneus threads #define N 20 // number of variables in a vector #pragma options align=mac68k // this function returns a result __global__ void cudaFunct(float *pArgument, float *pResult) { int i; // this loop will do sequences: // i = 0, 10, 20, ... // i = 1, 11, 21, ... // i = 2, 12, 22, ... // ... // i = 9, 19, 29, ... // // assuming SIM_THREADS = 10 for ( i = threadIdx.x; // start from i = thread ID i < N; // stop if all i's are done i += SIM_THREADS) // skip number of threads pResult[i] = pArgument[i] -pArgument[i-1]; } int main(void) { float *pHostArgument; float *pCudaArgument = 0; float *pHostResult; float *pCudaResult = 0; int i; hipError_t cudaStat = 0; // reserve memory in host system pHostArgument = (float *)malloc(N*sizeof(pHostArgument[0])); pHostResult = (float *) malloc(N*sizeof(pHostResult[0])); // reserve memory in cuda cudaStat += hipMalloc((void **) &pCudaArgument, N*sizeof(pCudaResult[0])); cudaStat += hipMalloc((void **) &pCudaResult, N*sizeof(pCudaResult[0])); // initialize argument for (i = 0; i < N; i++) pHostArgument[i] = float(i); // copy argument from host to cuda cudaStat += hipMemcpy( pCudaArgument, // destination pHostArgument, // source N*sizeof(pCudaResult[0]), // amount to copy hipMemcpyHostToDevice); // type: host -> device // execute in cuda hipLaunchKernelGGL(( cudaFunct), dim3(1),dim3(SIM_THREADS), 0, 0, pCudaArgument, pCudaResult); // copy result from cuda to host cudaStat += hipMemcpy( pHostResult, // destination pCudaResult, // source N*sizeof(pCudaResult[0]), // amount to copy hipMemcpyDeviceToHost); // type: device -> host for (i = 0; i < N; i++) printf("%f\n", pHostResult[i]); if (cudaStat) { printf('\n Problem in memory management.\n') } else { printf('\n Memory operations passed.\n') } }
87b716372ff37af24d688257e628c7846a105ac3.cu
#include <stdio.h> #include <cuda.h> #define SIM_THREADS 10 // how many simultaneus threads #define N 20 // number of variables in a vector #pragma options align=mac68k // this function returns a result __global__ void cudaFunct(float *pArgument, float *pResult) { int i; // this loop will do sequences: // i = 0, 10, 20, ... // i = 1, 11, 21, ... // i = 2, 12, 22, ... // ... // i = 9, 19, 29, ... // // assuming SIM_THREADS = 10 for ( i = threadIdx.x; // start from i = thread ID i < N; // stop if all i's are done i += SIM_THREADS) // skip number of threads pResult[i] = pArgument[i] -pArgument[i-1]; } int main(void) { float *pHostArgument; float *pCudaArgument = 0; float *pHostResult; float *pCudaResult = 0; int i; cudaError_t cudaStat = 0; // reserve memory in host system pHostArgument = (float *)malloc(N*sizeof(pHostArgument[0])); pHostResult = (float *) malloc(N*sizeof(pHostResult[0])); // reserve memory in cuda cudaStat += cudaMalloc((void **) &pCudaArgument, N*sizeof(pCudaResult[0])); cudaStat += cudaMalloc((void **) &pCudaResult, N*sizeof(pCudaResult[0])); // initialize argument for (i = 0; i < N; i++) pHostArgument[i] = float(i); // copy argument from host to cuda cudaStat += cudaMemcpy( pCudaArgument, // destination pHostArgument, // source N*sizeof(pCudaResult[0]), // amount to copy cudaMemcpyHostToDevice); // type: host -> device // execute in cuda cudaFunct<<<1,SIM_THREADS>>>(pCudaArgument, pCudaResult); // copy result from cuda to host cudaStat += cudaMemcpy( pHostResult, // destination pCudaResult, // source N*sizeof(pCudaResult[0]), // amount to copy cudaMemcpyDeviceToHost); // type: device -> host for (i = 0; i < N; i++) printf("%f\n", pHostResult[i]); if (cudaStat) { printf('\n Problem in memory management.\n') } else { printf('\n Memory operations passed.\n') } }
cc0dd78ae0431a87f1b0b6028df591c8f42c1422.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 2048 #define BLOCK_COLUMNS 32 #define BLOCK_ROWS 32 __global__ void transpose_naive(float *dev_out, const float *dev_in) { int x = blockIdx.x * BLOCK_COLUMNS + threadIdx.x; int y = blockIdx.y * BLOCK_ROWS + threadIdx.y; dev_out[x*N + y] = dev_in[y*N + x]; } int main(){ float * host_in, * host_out, * host_test; float * dev_in, * dev_out; int size = N*N; int mem_syze = size * sizeof(float); int i; // timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //allocate memory in host and device host_in = (float *)malloc(mem_syze); host_out = (float *)malloc(mem_syze); host_test = (float *)malloc(mem_syze); hipMalloc((void**)&dev_in, mem_syze); hipMalloc((void**)&dev_out, mem_syze); //fill matrix in host for(i = 0; i<size; ++i){ host_in[i] = i; host_test[i] = 0; } //transfer matrix from host to device hipMemcpy(dev_in, host_in, mem_syze, hipMemcpyHostToDevice); //transpose matrix in device dim3 dimGrid(N/BLOCK_COLUMNS, N/BLOCK_ROWS, 1); dim3 dimBlock(BLOCK_COLUMNS, BLOCK_ROWS, 1); hipEventRecord(start); hipLaunchKernelGGL(( transpose_naive), dim3(dimGrid), dim3(dimBlock) , 0, 0, dev_out, dev_in); hipEventRecord(stop); // transfer matrix from device to host hipMemcpy(host_out, dev_out, mem_syze, hipMemcpyDeviceToHost); // correctness test //printf("\ncorrecteness: %d \n", correct(host_in, host_out)); //showing BandN hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); for (int j = 0; j < N; j++){ for (int i = 0; i < N; i++){ host_test[j*N + i] = host_in[i*N + j]; } } bool passed = true; for (int i = 0; i < N*N; i++){ if (host_test[i] != host_out[i]) { passed = false; break; } } if (passed) {printf("Passed. \n");} else {printf("Not passed. \n");} printf("\nblock: %d x %d", dimBlock.y, dimBlock.x); printf("\nmilliseconds: %f", milliseconds); printf("\nBandN: %f GB/s \n", 2*mem_syze/milliseconds/1e6); //free memory free(host_in); free(host_out); free(host_test); hipFree(dev_in); hipFree(dev_out); return 0; }
cc0dd78ae0431a87f1b0b6028df591c8f42c1422.cu
#include <stdio.h> #define N 2048 #define BLOCK_COLUMNS 32 #define BLOCK_ROWS 32 __global__ void transpose_naive(float *dev_out, const float *dev_in) { int x = blockIdx.x * BLOCK_COLUMNS + threadIdx.x; int y = blockIdx.y * BLOCK_ROWS + threadIdx.y; dev_out[x*N + y] = dev_in[y*N + x]; } int main(){ float * host_in, * host_out, * host_test; float * dev_in, * dev_out; int size = N*N; int mem_syze = size * sizeof(float); int i; // timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //allocate memory in host and device host_in = (float *)malloc(mem_syze); host_out = (float *)malloc(mem_syze); host_test = (float *)malloc(mem_syze); cudaMalloc((void**)&dev_in, mem_syze); cudaMalloc((void**)&dev_out, mem_syze); //fill matrix in host for(i = 0; i<size; ++i){ host_in[i] = i; host_test[i] = 0; } //transfer matrix from host to device cudaMemcpy(dev_in, host_in, mem_syze, cudaMemcpyHostToDevice); //transpose matrix in device dim3 dimGrid(N/BLOCK_COLUMNS, N/BLOCK_ROWS, 1); dim3 dimBlock(BLOCK_COLUMNS, BLOCK_ROWS, 1); cudaEventRecord(start); transpose_naive<<< dimGrid, dimBlock >>>(dev_out, dev_in); cudaEventRecord(stop); // transfer matrix from device to host cudaMemcpy(host_out, dev_out, mem_syze, cudaMemcpyDeviceToHost); // correctness test //printf("\ncorrecteness: %d \n", correct(host_in, host_out)); //showing BandN cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); for (int j = 0; j < N; j++){ for (int i = 0; i < N; i++){ host_test[j*N + i] = host_in[i*N + j]; } } bool passed = true; for (int i = 0; i < N*N; i++){ if (host_test[i] != host_out[i]) { passed = false; break; } } if (passed) {printf("Passed. \n");} else {printf("Not passed. \n");} printf("\nblock: %d x %d", dimBlock.y, dimBlock.x); printf("\nmilliseconds: %f", milliseconds); printf("\nBandN: %f GB/s \n", 2*mem_syze/milliseconds/1e6); //free memory free(host_in); free(host_out); free(host_test); cudaFree(dev_in); cudaFree(dev_out); return 0; }
a7106f9d13b78f4802301c2c383b9c2c77add653.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Este codigo calcula (u[i]-u)/u_d_m en el dispositivo GPU // y mide el tiempo de ejecucion #include <stdio.h> #include <string.h> #include "soporte.cu" __global__ void operacionKernelGPU(float* u, float* lu, float u_m, float u_d, int n) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < n) lu[idx] = (u[idx]-u_m)/u_d; } int main(int argc, char**argv) { Timer timer; FILE *f; if (argc > 1) { char fname[30]; strcpy(fname,"gpu_"); strcat(fname,argv[1]); f = fopen(fname, "w"); if (f == NULL) { printf("Error abriendo archivo\n"); exit(1); } } float total_time = 0.0; printf("\nInicializando...\n"); fflush(stdout); startTime(&timer); unsigned int n; if(argc == 1) { n = 25; } else if(argc == 2) { n = atoi(argv[1]); } else { printf("\n Parametros no validos!" "\n Uso: ./derivCPU # Vector of longitud 10,000" "\n Uso: ./derivCPU <m> # Vector of longitud m" "\n"); exit(0); } // Asignando memoria a las variables u_h, lu_h ---------------------------------------------------------- const int u_m = 0; const int u_d = 255; int size = n*sizeof(float); float* h_u = (float*) malloc( size ); for (unsigned int i=0; i < n; i++) { h_u[i] = i; } float* h_lu = (float*) malloc( size ); stopTime(&timer); float t1 = elapsedTime(timer); printf("Longitud del vector: %d\n", n); printf("t1 = %f s\n", t1); if (argc > 1) fprintf(f, "%f\n", t1); total_time = total_time + t1; // Declaracion de vectores/arreglos a usarse en el GPU ---------------------------------------------------------- printf("Asignando memoria en el GPU...\n"); fflush(stdout); startTime(&timer); float* d_u; hipMalloc((void**)&d_u, size); float* d_lu; hipMalloc((void**)&d_lu, size); // hipDeviceSynchronize(); stopTime(&timer); t1 = elapsedTime(timer); printf("t2 = %f s\n", t1); if (argc > 1) fprintf(f, "%f\n", t1); total_time = total_time + t1; // Copiando data del host al GPU ---------------------------------------------------------- printf("Copiando datos desde el host al GPU...\n"); fflush(stdout); startTime(&timer); hipMemcpy( d_u, h_u, size, hipMemcpyHostToDevice ); stopTime(&timer); t1 = elapsedTime(timer); printf("t3 = %f s\n", t1); if (argc > 1) fprintf(f, "%f\n", t1); total_time = total_time + t1; // Launch kernel ---------------------------------------------------------- printf("Lanzando kernel...\n"); fflush(stdout); startTime(&timer); hipLaunchKernelGGL(( operacionKernelGPU), dim3(ceil(n/1024.0)), dim3(1024), 0, 0, d_u, d_lu, u_m, u_d, n); stopTime(&timer); t1 = elapsedTime(timer); printf("t4 = %f s\n", t1); if (argc > 1) fprintf(f, "%f\n", t1); total_time = total_time + t1; // Copiando variables del GPU al host ---------------------------------------------------------- printf("Copiando datos desde el GPU al host...\n"); fflush(stdout); startTime(&timer); hipMemcpy( h_lu, d_lu, size, hipMemcpyDeviceToHost ); stopTime(&timer); t1 = elapsedTime(timer); printf("t5 = %f s\n", t1); if (argc > 1) fprintf(f, "%f\n", t1); total_time = total_time + t1; // verificando resultados ---------------------------------------------------------- const float toleranciaRelativa = 1e-4; for(int i=0; i<n; i++) { float operBasic = (h_u[i]-u_m)/u_d; float errorRelativo = (operBasic-h_lu[i])/operBasic; if (errorRelativo > toleranciaRelativa || errorRelativo < -toleranciaRelativa) { printf("PRUEBA FALLIDA\n\n"); exit(0); } } printf("PRUEBA SUPERADA\n"); printf("tf = %f s\n", total_time); if (argc > 1) fprintf(f, "%f\n", total_time); if (n==25) { for(int i=0; i<n; i++) { printf("%10.8f\t",h_lu[i]); } printf("\n"); } free(h_u); free(h_lu); hipFree(d_u); hipFree(d_lu); if (argc > 1) fclose(f); }
a7106f9d13b78f4802301c2c383b9c2c77add653.cu
// Este codigo calcula (u[i]-u)/u_d_m en el dispositivo GPU // y mide el tiempo de ejecucion #include <stdio.h> #include <string.h> #include "soporte.cu" __global__ void operacionKernelGPU(float* u, float* lu, float u_m, float u_d, int n) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < n) lu[idx] = (u[idx]-u_m)/u_d; } int main(int argc, char**argv) { Timer timer; FILE *f; if (argc > 1) { char fname[30]; strcpy(fname,"gpu_"); strcat(fname,argv[1]); f = fopen(fname, "w"); if (f == NULL) { printf("Error abriendo archivo\n"); exit(1); } } float total_time = 0.0; printf("\nInicializando...\n"); fflush(stdout); startTime(&timer); unsigned int n; if(argc == 1) { n = 25; } else if(argc == 2) { n = atoi(argv[1]); } else { printf("\n Parametros no validos!" "\n Uso: ./derivCPU # Vector of longitud 10,000" "\n Uso: ./derivCPU <m> # Vector of longitud m" "\n"); exit(0); } // Asignando memoria a las variables u_h, lu_h ---------------------------------------------------------- const int u_m = 0; const int u_d = 255; int size = n*sizeof(float); float* h_u = (float*) malloc( size ); for (unsigned int i=0; i < n; i++) { h_u[i] = i; } float* h_lu = (float*) malloc( size ); stopTime(&timer); float t1 = elapsedTime(timer); printf("Longitud del vector: %d\n", n); printf("t1 = %f s\n", t1); if (argc > 1) fprintf(f, "%f\n", t1); total_time = total_time + t1; // Declaracion de vectores/arreglos a usarse en el GPU ---------------------------------------------------------- printf("Asignando memoria en el GPU...\n"); fflush(stdout); startTime(&timer); float* d_u; cudaMalloc((void**)&d_u, size); float* d_lu; cudaMalloc((void**)&d_lu, size); // cudaDeviceSynchronize(); stopTime(&timer); t1 = elapsedTime(timer); printf("t2 = %f s\n", t1); if (argc > 1) fprintf(f, "%f\n", t1); total_time = total_time + t1; // Copiando data del host al GPU ---------------------------------------------------------- printf("Copiando datos desde el host al GPU...\n"); fflush(stdout); startTime(&timer); cudaMemcpy( d_u, h_u, size, cudaMemcpyHostToDevice ); stopTime(&timer); t1 = elapsedTime(timer); printf("t3 = %f s\n", t1); if (argc > 1) fprintf(f, "%f\n", t1); total_time = total_time + t1; // Launch kernel ---------------------------------------------------------- printf("Lanzando kernel...\n"); fflush(stdout); startTime(&timer); operacionKernelGPU<<<ceil(n/1024.0), 1024>>>(d_u, d_lu, u_m, u_d, n); stopTime(&timer); t1 = elapsedTime(timer); printf("t4 = %f s\n", t1); if (argc > 1) fprintf(f, "%f\n", t1); total_time = total_time + t1; // Copiando variables del GPU al host ---------------------------------------------------------- printf("Copiando datos desde el GPU al host...\n"); fflush(stdout); startTime(&timer); cudaMemcpy( h_lu, d_lu, size, cudaMemcpyDeviceToHost ); stopTime(&timer); t1 = elapsedTime(timer); printf("t5 = %f s\n", t1); if (argc > 1) fprintf(f, "%f\n", t1); total_time = total_time + t1; // verificando resultados ---------------------------------------------------------- const float toleranciaRelativa = 1e-4; for(int i=0; i<n; i++) { float operBasic = (h_u[i]-u_m)/u_d; float errorRelativo = (operBasic-h_lu[i])/operBasic; if (errorRelativo > toleranciaRelativa || errorRelativo < -toleranciaRelativa) { printf("PRUEBA FALLIDA\n\n"); exit(0); } } printf("PRUEBA SUPERADA\n"); printf("tf = %f s\n", total_time); if (argc > 1) fprintf(f, "%f\n", total_time); if (n==25) { for(int i=0; i<n; i++) { printf("%10.8f\t",h_lu[i]); } printf("\n"); } free(h_u); free(h_lu); cudaFree(d_u); cudaFree(d_lu); if (argc > 1) fclose(f); }
27ed1ca2a5224e23f478c9b782ae698cef574159.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "_drop64.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); double *xmask = NULL; hipMalloc(&xmask, XSIZE*YSIZE); double dropout = 1; double scale = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( _drop64), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,xmask,dropout,scale); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( _drop64), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,xmask,dropout,scale); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( _drop64), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,xmask,dropout,scale); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
27ed1ca2a5224e23f478c9b782ae698cef574159.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "_drop64.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); double *xmask = NULL; cudaMalloc(&xmask, XSIZE*YSIZE); double dropout = 1; double scale = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); _drop64<<<gridBlock,threadBlock>>>(n,x,xmask,dropout,scale); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { _drop64<<<gridBlock,threadBlock>>>(n,x,xmask,dropout,scale); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { _drop64<<<gridBlock,threadBlock>>>(n,x,xmask,dropout,scale); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6e33a5637d744784a313542c8e2062ee8bf433c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2019,2020,2021 Sony Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/gather_nd.hpp> #include <nbla/cuda/utils/atomic_add.cuh> #include <nbla/utils/nd_index.hpp> #include <nbla/variable.hpp> namespace nbla { namespace gather_nd_cuda { template <typename T> __global__ void forward(const int y_size, T *y_data, const int x_size, const T *x_data, const int *x_shape, const int *x_stride, const int *idx_data, const int idx_rows, const int idx_cols) { NBLA_CUDA_KERNEL_LOOP(tid, y_size) { auto slice_length = y_size / idx_cols; auto index_column = tid / slice_length; auto x_offset = tid - index_column * slice_length; for (int m = 0; m < idx_rows; m++) { auto index = idx_data[m * idx_cols + index_column]; x_offset += (index < 0 ? x_shape[m] + index : index) * x_stride[m]; } // The idx_data comes from a Variable that may be different at any forward // call. Unlike the CPU code we do not want to check the error in device // code (that would imply raising a trap plus always synchronization and // costly recovery). Still we don't want to read from unaccessible memory. if (x_offset < x_size) { y_data[tid] = x_data[x_offset]; } } } template <typename T> __global__ void backward(const int y_size, const T *y_grad, const int x_size, T *x_grad, const int *x_shape, const int *x_stride, const int *idx_data, const int idx_rows, const int idx_cols) { NBLA_CUDA_KERNEL_LOOP(tid, y_size) { auto slice_length = y_size / idx_cols; auto index_column = tid / slice_length; auto x_offset = tid - index_column * slice_length; for (int m = 0; m < idx_rows; m++) { auto index = idx_data[m * idx_cols + index_column]; x_offset += (index < 0 ? x_shape[m] + index : index) * x_stride[m]; } if (x_offset < x_size) { atomic_add(&x_grad[x_offset], y_grad[tid]); } } } template <typename T> __global__ void accum_grad(const int size, const int *idx, const T *y_grad, T *x_grad) { NBLA_CUDA_KERNEL_LOOP(i, size) { atomic_add(x_grad + idx[i], y_grad[i]); } } } // namespace gather_nd_cuda template <typename T> void GatherNdCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) { GatherNd<T>::setup_impl(inputs, outputs); Shape_t src_meta_shape = {2 * inputs[0]->ndim()}; src_meta_.reshape(src_meta_shape, true); Context cpu_ctx{{"cpu:float"}, "CpuCachedArray", "0"}; auto ptr = src_meta_.cast_data_and_get_pointer<int>(cpu_ctx, true); for (auto s : inputs[0]->shape()) { *ptr++ = static_cast<int>(s); } for (auto s : inputs[0]->strides()) { *ptr++ = static_cast<int>(s); } } template <typename T> void GatherNdCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); auto src = inputs[0]->get_data_pointer<Tcu>(this->ctx_); auto idx = inputs[1]->get_data_pointer<int>(this->ctx_); auto dst = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true); auto idx_rows = static_cast<int>(inputs[1]->shape().at(0)); auto idx_cols = static_cast<int>(ndi::inner_size(inputs[1]->shape(), 1)); auto src_shape_ptr = src_meta_.get_data_pointer<int>(this->ctx_); auto src_stride_ptr = src_shape_ptr + inputs[0]->ndim(); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(gather_nd_cuda::forward, outputs[0]->size(), dst, inputs[0]->size(), src, src_shape_ptr, src_stride_ptr, idx, idx_rows, idx_cols); } template <typename T> void GatherNdCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } cuda_set_device(this->device_); if (!accum[0]) { inputs[0]->grad()->zero(); } auto g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); auto g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, false); auto idx = inputs[1]->get_data_pointer<int>(this->ctx_); auto idx_rows = static_cast<int>(inputs[1]->shape().at(0)); auto idx_cols = static_cast<int>(ndi::inner_size(inputs[1]->shape(), 1)); auto x_shape_ptr = src_meta_.get_data_pointer<int>(this->ctx_); auto x_stride_ptr = x_shape_ptr + inputs[0]->ndim(); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(gather_nd_cuda::backward, outputs[0]->size(), g_y, inputs[0]->size(), g_x, x_shape_ptr, x_stride_ptr, idx, idx_rows, idx_cols); } } // namespace nbla
6e33a5637d744784a313542c8e2062ee8bf433c4.cu
// Copyright 2019,2020,2021 Sony Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/gather_nd.hpp> #include <nbla/cuda/utils/atomic_add.cuh> #include <nbla/utils/nd_index.hpp> #include <nbla/variable.hpp> namespace nbla { namespace gather_nd_cuda { template <typename T> __global__ void forward(const int y_size, T *y_data, const int x_size, const T *x_data, const int *x_shape, const int *x_stride, const int *idx_data, const int idx_rows, const int idx_cols) { NBLA_CUDA_KERNEL_LOOP(tid, y_size) { auto slice_length = y_size / idx_cols; auto index_column = tid / slice_length; auto x_offset = tid - index_column * slice_length; for (int m = 0; m < idx_rows; m++) { auto index = idx_data[m * idx_cols + index_column]; x_offset += (index < 0 ? x_shape[m] + index : index) * x_stride[m]; } // The idx_data comes from a Variable that may be different at any forward // call. Unlike the CPU code we do not want to check the error in device // code (that would imply raising a trap plus always synchronization and // costly recovery). Still we don't want to read from unaccessible memory. if (x_offset < x_size) { y_data[tid] = x_data[x_offset]; } } } template <typename T> __global__ void backward(const int y_size, const T *y_grad, const int x_size, T *x_grad, const int *x_shape, const int *x_stride, const int *idx_data, const int idx_rows, const int idx_cols) { NBLA_CUDA_KERNEL_LOOP(tid, y_size) { auto slice_length = y_size / idx_cols; auto index_column = tid / slice_length; auto x_offset = tid - index_column * slice_length; for (int m = 0; m < idx_rows; m++) { auto index = idx_data[m * idx_cols + index_column]; x_offset += (index < 0 ? x_shape[m] + index : index) * x_stride[m]; } if (x_offset < x_size) { atomic_add(&x_grad[x_offset], y_grad[tid]); } } } template <typename T> __global__ void accum_grad(const int size, const int *idx, const T *y_grad, T *x_grad) { NBLA_CUDA_KERNEL_LOOP(i, size) { atomic_add(x_grad + idx[i], y_grad[i]); } } } // namespace gather_nd_cuda template <typename T> void GatherNdCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) { GatherNd<T>::setup_impl(inputs, outputs); Shape_t src_meta_shape = {2 * inputs[0]->ndim()}; src_meta_.reshape(src_meta_shape, true); Context cpu_ctx{{"cpu:float"}, "CpuCachedArray", "0"}; auto ptr = src_meta_.cast_data_and_get_pointer<int>(cpu_ctx, true); for (auto s : inputs[0]->shape()) { *ptr++ = static_cast<int>(s); } for (auto s : inputs[0]->strides()) { *ptr++ = static_cast<int>(s); } } template <typename T> void GatherNdCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); auto src = inputs[0]->get_data_pointer<Tcu>(this->ctx_); auto idx = inputs[1]->get_data_pointer<int>(this->ctx_); auto dst = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true); auto idx_rows = static_cast<int>(inputs[1]->shape().at(0)); auto idx_cols = static_cast<int>(ndi::inner_size(inputs[1]->shape(), 1)); auto src_shape_ptr = src_meta_.get_data_pointer<int>(this->ctx_); auto src_stride_ptr = src_shape_ptr + inputs[0]->ndim(); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(gather_nd_cuda::forward, outputs[0]->size(), dst, inputs[0]->size(), src, src_shape_ptr, src_stride_ptr, idx, idx_rows, idx_cols); } template <typename T> void GatherNdCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } cuda_set_device(this->device_); if (!accum[0]) { inputs[0]->grad()->zero(); } auto g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); auto g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, false); auto idx = inputs[1]->get_data_pointer<int>(this->ctx_); auto idx_rows = static_cast<int>(inputs[1]->shape().at(0)); auto idx_cols = static_cast<int>(ndi::inner_size(inputs[1]->shape(), 1)); auto x_shape_ptr = src_meta_.get_data_pointer<int>(this->ctx_); auto x_stride_ptr = x_shape_ptr + inputs[0]->ndim(); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(gather_nd_cuda::backward, outputs[0]->size(), g_y, inputs[0]->size(), g_x, x_shape_ptr, x_stride_ptr, idx, idx_rows, idx_cols); } } // namespace nbla
a7852f0402109e833c58e47936ddae65e96355b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <algorithm> #define FILTER_WIDTH 3 __constant__ int dc_xFilter[FILTER_WIDTH * FILTER_WIDTH]; __constant__ int dc_yFilter[FILTER_WIDTH * FILTER_WIDTH]; #define CHECK(call){\ const hipError_t error = call;\ if (error != hipSuccess){\ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__);\ fprintf(stderr, "code: %d, reason: %s\n", error, hipGetErrorString(error));\ exit(EXIT_FAILURE);\ }\ } struct GpuTimer{ hipEvent_t start; hipEvent_t stop; GpuTimer(){ hipEventCreate(&start); hipEventCreate(&stop); } ~GpuTimer(){ hipEventDestroy(start); hipEventDestroy(stop); } void Start(){ hipEventRecord(start, 0); hipEventSynchronize(start); } void Stop(){ hipEventRecord(stop, 0); } float Eplapsed(){ float eplapsed; hipEventSynchronize(stop); hipEventElapsedTime(&eplapsed, start, stop); return eplapsed; } }; void readRGBPnm (char *fileName, int &width, int &height, uchar3 *&pixels){ FILE *f = fopen(fileName, "r"); if (f == NULL){ printf("Cannot read %s\n", fileName); exit(EXIT_FAILURE); } char type[3]; fscanf(f, "%s", type); // Check the type of input img if (strcmp(type, "P3") != 0){ fclose(f); printf("Cannot read %s\n", fileName); exit(EXIT_FAILURE); } fscanf(f, "%i", &width); fscanf(f, "%i", &height); int maxVal; fscanf(f, "%i", &maxVal); // Assume 1 byte per value if (maxVal > 255){ fclose(f); printf("Cannot read %s\n", fileName); exit(EXIT_FAILURE); } pixels = (uchar3 *)malloc(width * height * sizeof(uchar3)); for (int i = 0; i< width * height; i++){ fscanf(f, "%hhu%hhu%hhu", &pixels[i].x, &pixels[i].y, &pixels[i].z); } fclose(f); } void writeRGBPnm (const uchar3 *pixels, int width, int height, char *fileName){ FILE *f = fopen(fileName, "w"); if (f == NULL){ printf("Cannot write %s\n", fileName); exit(EXIT_FAILURE); } fprintf(f, "P3\n%i\n%i\n255\n", width, height); for (int i = 0; i < width * height; i++){ fprintf(f, "%hhu\n%hhu\n%hhu\n", pixels[i].x, pixels[i].y, pixels[i].z); } fclose(f); } void writeGrayScalePnm (int *pixels, int width, int height, char *fileName){ FILE *f = fopen(fileName, "w"); if (f == NULL){ printf("Cannot write %s\n", fileName); exit(EXIT_FAILURE); } fprintf(f, "P2\n%i\n%i\n255\n", width, height); for (int i = 0; i < width * height; i++){ fprintf(f, "%hhu\n", pixels[i]); } fclose(f); } void writeMatrixTxt (int *pixels, int width, int height, char *fileName){ FILE *f = fopen(fileName, "w"); if (f == NULL){ printf("Cannot write %s\n", fileName); exit(EXIT_FAILURE); } for (int i = 0; i < height; i++){ for (int j = 0; j < width; j++){ fprintf(f, "%d ", pixels[i * width + j]); } fprintf(f, "\n"); } fclose(f); } void initSobelFilter(int *filter, bool horizontal){ int filterWidth = FILTER_WIDTH; int val = 0; int margin = filterWidth / 2; for (int filterR = 0; filterR < filterWidth; filterR++){ for (int filterC = 0; filterC < filterWidth; filterC++){ if (horizontal == true){ if (filterC < margin){ val = 1; } else if (filterC == margin){ val = 0; } else{ val = -1; } if (filterR == margin){ val *= 2; } } else{ if (filterR < margin){ val = 1; } else if (filterR == margin){ val = 0; } else{ val = -1; } if (filterC == margin){ val *= 2; } } filter[filterR * filterWidth + filterC] = val; } } } void convertRgb2Gray (const uchar3 *in, int n, int *out){ for (int i = 0; i < n; i++){ out[i] = 0.299f * in[i].x + 0.587f * in[i].y + 0.114f * in[i].z; } } void getPixelsImportance (int *in, int width, int height, int *xFilter, int *yFilter, int filterWidth, int *out){ int margin = filterWidth / 2; for (int col = 0; col < width; col++){ for (int row = 0; row < height; row++){ int curIdx = row * width + col; float xSum = 0, ySum = 0; for (int filterRow = -margin; filterRow <= margin; filterRow++){ for (int filterCol = -margin; filterCol <= margin; filterCol++){ int filterIdx = (filterRow + margin) * filterWidth + filterCol + margin; int dx = min(width - 1, max(0, col + filterCol)); int dy = min(height - 1, max(0, row + filterRow)); int idx = dy * width + dx; xSum += in[idx] * xFilter[filterIdx]; ySum += in[idx] * yFilter[filterIdx]; } } out[curIdx] = abs(xSum) + abs(ySum); } } } void getLeastImportantPixels (int *in, int width, int height, int *out){ int lastRow = (height - 1) * width; memcpy(out + lastRow, in + lastRow, width * sizeof(int)); for (int row = height - 2; row >= 0; row--){ int below = row + 1; for (int col = 0; col < width; col++ ){ int idx = row * width + col; int leftCol = max(0, col - 1); int rightCol = min(width - 1, col + 1); int belowIdx = below * width + col; int leftBelowIdx = below * width + leftCol; int rightBelowIdx = below * width + rightCol; out[idx] = min(out[belowIdx], min(out[leftBelowIdx], out[rightBelowIdx])) + in[idx]; } } } void getSeamAt (int *in, int width, int height, int *out, int col){ out[0] = col; for (int row = 1; row < height; row++){ int col = out[row - 1]; int idx = row * width + col; int leftCol = max(0, col - 1); int rightCol = min(width - 1, col + 1); int leftIdx = row * width + leftCol; int rightIdx = row * width + rightCol; if (in[leftIdx] < in[idx]){ if (in[leftIdx] < in[rightIdx]) out[row] = leftCol; else out[row] = rightCol; } else{ if (in[idx] < in[rightIdx]) out[row] = col; else out[row] = rightCol; } } } void getLeastImportantSeam (int *in, int width, int height, int *out){ int minCol = 0; for (int i = 0; i < width; i++){ if (in[i] < in[minCol]) minCol = i; } // printf("min col %d-%d\n", minCol, in[minCol]); getSeamAt(in, width, height, out, minCol); } void removeSeam (const uchar3 *in, int width, int height, uchar3 *out, int *seam){ int newWidth = width - 1; for (int row = 0; row < height; row++){ int col = seam[row]; memcpy(out + row * newWidth, in + row * width, col * sizeof(uchar3)); int nextIdxOut = row * newWidth + col; int nextIdxIn = row * width + col + 1; memcpy(out + nextIdxOut, in + nextIdxIn, (newWidth - col) * sizeof(uchar3)); } } void seamCarvingHost(const uchar3 *in, int width, int height, uchar3 *out, int *xFilter, int *yFilter, int filterWidth){ // convert image to grayscale int *grayScalePixels = (int *)malloc(width * height * sizeof(int)); convertRgb2Gray(in, width * height, grayScalePixels); // edge detection int *pixelsImportance = (int *)malloc(width * height * sizeof(int)); getPixelsImportance(grayScalePixels, width, height, xFilter, yFilter, filterWidth, pixelsImportance); // find the least important seam int *leastPixelsImportance = (int *)malloc(width * height * sizeof(int)); getLeastImportantPixels(pixelsImportance, width, height, leastPixelsImportance); int *leastImportantSeam = (int *)malloc(height * sizeof(int)); getLeastImportantSeam(leastPixelsImportance, width, height, leastImportantSeam); // remove the least important seam removeSeam(in, width, height, out, leastImportantSeam); // free memories free(grayScalePixels); free(pixelsImportance); free(leastPixelsImportance); free(leastImportantSeam); } __global__ void convertRgb2GrayKernel(uchar3 *in, int width, int height, int *out){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (col < width && row < height){ int idx = row * width + col; out[idx] = 0.299f * in[idx].x + 0.587f * in[idx].y + 0.114f * in[idx].z; } } __global__ void getPixelsImportanceKernel (int *in, int width, int height, int filterWidth, int *out){ int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; if (col < width && row < height){ int margin = filterWidth / 2; int curIdx = row * width + col; float xSum = 0, ySum = 0; for (int filterRow = -margin; filterRow <= margin; filterRow++){ for (int filterCol = -margin; filterCol <= margin; filterCol++){ int filterIdx = (filterRow + margin) * filterWidth + filterCol + margin; int dx = min(width - 1, max(0, col + filterCol)); int dy = min(height - 1, max(0, row + filterRow)); int idx = dy * width + dx; xSum += in[idx] * dc_xFilter[filterIdx]; ySum += in[idx] * dc_yFilter[filterIdx]; } } out[curIdx] = abs(xSum) + abs(ySum); } } __global__ void upTriangle (int *in, int width, int height, int yStart, int yStop, int baseWith, int *out){ int xStart = baseWith * blockIdx.x * blockDim.x + threadIdx.x * baseWith; int xStop = xStart + baseWith - 1; for (int y = yStart; y >= yStop; y--){ for (int x = xStart; x <= xStop; x++){ if (x < width){ int idx = y * width + x; int below = (y + 1) * width + x; int left = (y + 1) * width + max(0, x - 1); int right = (y + 1) * width + min(width - 1, x + 1); out[idx] = in[idx] + min(out[below], min(out[left], out[right])); } } xStart += 1; xStop -= 1; } } __global__ void downTriangle (int *in, int width, int height, int yStart, int yStop, int baseWith, int *out){ int xStop = baseWith * (threadIdx.x + blockDim.x * blockIdx.x); int xStart = xStop - 1; for (int y = yStart; y >= yStop; y--){ for (int x = xStart; x <= xStop; x++){ if (x >= 0 && x < width){ int idx = y * width + x; int below = (y + 1) * width + x; int left = (y + 1) * width + max(0, x - 1); int right = (y + 1) * width + min(width - 1, x + 1); out[idx] = in[idx] + min(out[below], min(out[left], out[right])); } } xStart -= 1; xStop += 1; } } __global__ void getMinColSeam (int *in, int width, int *out){ extern __shared__ int s_mem[]; int i = (blockDim.x * blockIdx.x + threadIdx.x) * 2; if (i < width) s_mem[threadIdx.x] = i; if (i + 1 < width) s_mem[threadIdx.x + 1] = i + 1; __syncthreads(); for (int stride = 1; stride < 2 * blockDim.x; stride *= 2){ if (threadIdx.x % stride == 0){ if (i + stride < width){ if (in[s_mem[threadIdx.x]] > in[s_mem[threadIdx.x + stride]]){ s_mem[threadIdx.x] = s_mem[threadIdx.x + stride]; } } } __syncthreads(); } if (threadIdx.x == 0){ out[blockIdx.x] = s_mem[0]; } } void seamCarvingDevice(const uchar3 *in, int width, int height, uchar3 *out, int *xFilter, int *yFilter, int filterWidth, dim3 blockSize, int baseWith){ // prepare some values int lastRowIdx = (height - 1) * width; int stripHeight = baseWith % 2 == 0 ? baseWith / 2 + 1 : (baseWith + 1) / 2 + 1; int gridSizeTriangle = (width - 1) / (blockSize.x * baseWith) + 1; int minColGridSize = (width - 1) / (2 * blockSize.x) + 1; size_t dataSize = width * height * sizeof(uchar3); size_t rowSize = width * sizeof(int); size_t grayScaleSize = width * height * sizeof(int); dim3 gridSize((width - 1) / blockSize.x + 1, (height - 1) / blockSize.y + 1); // allocate device memories uchar3 *d_in; int *d_grayScalePixels, *d_pixelsImportance, *d_leastImportantPixels, *d_minCol; CHECK(hipMalloc(&d_in, dataSize)); CHECK(hipMalloc(&d_grayScalePixels, grayScaleSize)); CHECK(hipMalloc(&d_pixelsImportance, grayScaleSize)); CHECK(hipMalloc(&d_leastImportantPixels, grayScaleSize)); CHECK(hipMalloc(&d_minCol, minColGridSize * sizeof(int))); // allocate host memories int *leastPixelsImportance = (int *)malloc(grayScaleSize); int *leastImportantSeam = (int *)malloc(height * sizeof(int)); int *minCol = (int *)malloc(minColGridSize * sizeof(int)); // copy data to device memories CHECK(hipMemcpy(d_in, in, dataSize, hipMemcpyHostToDevice)); // convert image to grayscale hipLaunchKernelGGL(( convertRgb2GrayKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_in, width, height, d_grayScalePixels); CHECK(hipGetLastError()); // edge detection hipLaunchKernelGGL(( getPixelsImportanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_grayScalePixels, width, height, filterWidth, d_pixelsImportance); CHECK(hipGetLastError()); // find the least important pixels CHECK(hipMemcpy(d_leastImportantPixels + lastRowIdx, d_pixelsImportance + lastRowIdx, rowSize, hipMemcpyDeviceToDevice)); for (int y = height - 2; y >= 0; y -= stripHeight){ int yStart = y; int yStop = max(0, yStart - stripHeight + 1); hipLaunchKernelGGL(( upTriangle), dim3(gridSizeTriangle), dim3(blockSize.x), 0, 0, d_pixelsImportance, width, height, yStart, yStop, baseWith, d_leastImportantPixels); yStart = max(0, yStart - 1); yStop = max(0, yStart - stripHeight + 1); hipLaunchKernelGGL(( downTriangle), dim3(gridSizeTriangle + 1), dim3(blockSize.x), 0, 0, d_pixelsImportance, width, height, yStart, yStop, baseWith, d_leastImportantPixels); } CHECK(hipMemcpy(leastPixelsImportance, d_leastImportantPixels, grayScaleSize, hipMemcpyDeviceToHost)); // find the least important seam hipLaunchKernelGGL(( getMinColSeam), dim3(minColGridSize), dim3(blockSize.x), blockSize.x * 2 * sizeof(int), 0, d_leastImportantPixels, width, d_minCol); CHECK(hipGetLastError()); CHECK(hipMemcpy(minCol, d_minCol, minColGridSize * sizeof(int), hipMemcpyDeviceToHost)); int mc = minCol[0]; for (int i = 0; i < minColGridSize; i += 1){ if (leastPixelsImportance[minCol[i]] < leastPixelsImportance[mc]){ mc = minCol[i]; } } getSeamAt(leastPixelsImportance, width, height, leastImportantSeam, mc); // remove the least important seam removeSeam(in, width, height, out, leastImportantSeam); // free memories CHECK(hipFree(d_in)); CHECK(hipFree(d_grayScalePixels)); CHECK(hipFree(d_pixelsImportance)); CHECK(hipFree(d_leastImportantPixels)); CHECK(hipFree(d_minCol)); free(leastPixelsImportance); free(leastImportantSeam); free(minCol); } void seamCarving(const uchar3 *in, int width, int height, uchar3 *out, int newWidth, int *xFilter, int *yFilter, int filterWidth, bool usingDevice=false, dim3 blockSize=dim3(1, 1), int baseWith = 0){ if (usingDevice == false){ printf("\nSeam carving by host\n"); } else{ printf("\nSeam carving by device\n"); // copy x filter, y filter on host to dc_x filter, dc_y filter on device size_t filterSize = filterWidth * filterWidth * sizeof(int); CHECK(hipMemcpyToSymbol(dc_xFilter, xFilter, filterSize)); CHECK(hipMemcpyToSymbol(dc_yFilter, yFilter, filterSize)); } GpuTimer timer; timer.Start(); // allocate host memories uchar3 *src = (uchar3 *)malloc(width * height * sizeof(uchar3)); uchar3 *dst = (uchar3 *)malloc(width * height * sizeof(uchar3)); // store the pointer for freeing uchar3 *originalSrc = src; uchar3 *originalDst = dst; // copy input data to src pointer memcpy(src, in, width * height * sizeof(uchar3)); // do the seam carving by decrease width by 1 until newWidth for (int w = width; w > newWidth; w--){ // resize the dst pointer with current width - 1; dst = (uchar3 *)realloc(dst, (w-1) * height * sizeof(uchar3)); // seamCarving the picture if (usingDevice == false){ seamCarvingHost(src, w, height, dst, xFilter, yFilter, filterWidth); } else{ seamCarvingDevice(src, w, height, dst, xFilter, yFilter, filterWidth, blockSize, baseWith); } // swap src and dst uchar3 * temp = src; src = dst; dst = temp; } // copy the output data to the out pointer memcpy(out, src, newWidth * height * sizeof(uchar3)); // free memories free(originalDst); free(originalSrc); timer.Stop(); printf("Time: %.3f ms\n", timer.Eplapsed()); } float computeError (uchar3 *a1, uchar3* a2, int n){ float err = 0; for (int i = 0; i < n; i++){ err += abs((int)a1[i].x - (int)a2[i].x); err += abs((int)a1[i].y - (int)a2[i].y); err += abs((int)a1[i].z - (int)a2[i].z); } err /= (n * 3); return err; } void printError (uchar3 *a1, uchar3 *a2, int width, int height){ float err = computeError(a1, a2, width * height); printf("Error: %f\n", err); } void printDeviceInfo(int codeVer){ hipDeviceProp_t devProv; CHECK(hipGetDeviceProperties(&devProv, 0)); printf("Vesrion of code: %d\n", codeVer); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %lu bytes\n", devProv.totalGlobalMem); printf("CMEM: %lu bytes\n", devProv.totalConstMem); printf("L2 cache: %i bytes\n", devProv.l2CacheSize); printf("SMEM / one SM: %lu bytes\n", devProv.sharedMemPerMultiprocessor); printf("****************************\n"); } char *concatStr(const char *s1, const char *s2){ char *result = (char *)malloc(strlen(s1) + strlen(s2) + 1); strcpy(result, s1); strcat(result, s2); return result; } int main (int argc, char **argv){ if (argc != 4 && argc != 6){ printf("The number of arguments is invalid\n"); return EXIT_FAILURE; } int seamCount = atoi(argv[2]); int baseWith = atoi(argv[3]); // Read input image file int width, height; uchar3 *inPixels; readRGBPnm(argv[1], width, height, inPixels); printf("\nImage size (width * height): %i x %i\n", width, height); int newWidth = width - seamCount; if (newWidth <= 0){ printf("The count of removed seams must be smaller than the width of the image"); return EXIT_FAILURE; } printf("\nNew image size (width * height): %i x %i\n", newWidth, height); // print device info int codeVer = 1; printDeviceInfo(codeVer); // init out pointer uchar3 *correctOutPixels = (uchar3 *)malloc(newWidth * height * sizeof(uchar3)); uchar3 *outPixels = (uchar3 *)malloc(newWidth * height * sizeof(uchar3)); // Set up x sobel filter and y sobel filter int filterWidth = FILTER_WIDTH; int *xFilter = (int *)malloc(filterWidth * filterWidth * sizeof(int)); int *yFilter = (int *)malloc(filterWidth * filterWidth * sizeof(int)); initSobelFilter(xFilter, true); initSobelFilter(yFilter, false); // Seam carving not using device seamCarving(inPixels, width, height, correctOutPixels, newWidth, xFilter, yFilter, filterWidth); // get input block size dim3 blockSize(32, 32); //default if (argc == 5){ blockSize.x = atoi(argv[3]); blockSize.y = atoi(argv[4]); } // Seam carving using device seamCarving(inPixels, width, height, outPixels, newWidth, xFilter, yFilter, filterWidth, true, blockSize, baseWith); printError(correctOutPixels, outPixels, newWidth, height); // Write results to files char *outFileNameBase = strtok(argv[1], "."); //get rid of extension writeRGBPnm(correctOutPixels, newWidth, height, concatStr(outFileNameBase, "_host.pnm")); writeRGBPnm(outPixels, newWidth, height, concatStr(outFileNameBase, "_device.pnm")); // Free memories free(inPixels); free(xFilter); free(yFilter); free(correctOutPixels); free(outPixels); }
a7852f0402109e833c58e47936ddae65e96355b9.cu
#include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <algorithm> #define FILTER_WIDTH 3 __constant__ int dc_xFilter[FILTER_WIDTH * FILTER_WIDTH]; __constant__ int dc_yFilter[FILTER_WIDTH * FILTER_WIDTH]; #define CHECK(call){\ const cudaError_t error = call;\ if (error != cudaSuccess){\ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__);\ fprintf(stderr, "code: %d, reason: %s\n", error, cudaGetErrorString(error));\ exit(EXIT_FAILURE);\ }\ } struct GpuTimer{ cudaEvent_t start; cudaEvent_t stop; GpuTimer(){ cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer(){ cudaEventDestroy(start); cudaEventDestroy(stop); } void Start(){ cudaEventRecord(start, 0); cudaEventSynchronize(start); } void Stop(){ cudaEventRecord(stop, 0); } float Eplapsed(){ float eplapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&eplapsed, start, stop); return eplapsed; } }; void readRGBPnm (char *fileName, int &width, int &height, uchar3 *&pixels){ FILE *f = fopen(fileName, "r"); if (f == NULL){ printf("Cannot read %s\n", fileName); exit(EXIT_FAILURE); } char type[3]; fscanf(f, "%s", type); // Check the type of input img if (strcmp(type, "P3") != 0){ fclose(f); printf("Cannot read %s\n", fileName); exit(EXIT_FAILURE); } fscanf(f, "%i", &width); fscanf(f, "%i", &height); int maxVal; fscanf(f, "%i", &maxVal); // Assume 1 byte per value if (maxVal > 255){ fclose(f); printf("Cannot read %s\n", fileName); exit(EXIT_FAILURE); } pixels = (uchar3 *)malloc(width * height * sizeof(uchar3)); for (int i = 0; i< width * height; i++){ fscanf(f, "%hhu%hhu%hhu", &pixels[i].x, &pixels[i].y, &pixels[i].z); } fclose(f); } void writeRGBPnm (const uchar3 *pixels, int width, int height, char *fileName){ FILE *f = fopen(fileName, "w"); if (f == NULL){ printf("Cannot write %s\n", fileName); exit(EXIT_FAILURE); } fprintf(f, "P3\n%i\n%i\n255\n", width, height); for (int i = 0; i < width * height; i++){ fprintf(f, "%hhu\n%hhu\n%hhu\n", pixels[i].x, pixels[i].y, pixels[i].z); } fclose(f); } void writeGrayScalePnm (int *pixels, int width, int height, char *fileName){ FILE *f = fopen(fileName, "w"); if (f == NULL){ printf("Cannot write %s\n", fileName); exit(EXIT_FAILURE); } fprintf(f, "P2\n%i\n%i\n255\n", width, height); for (int i = 0; i < width * height; i++){ fprintf(f, "%hhu\n", pixels[i]); } fclose(f); } void writeMatrixTxt (int *pixels, int width, int height, char *fileName){ FILE *f = fopen(fileName, "w"); if (f == NULL){ printf("Cannot write %s\n", fileName); exit(EXIT_FAILURE); } for (int i = 0; i < height; i++){ for (int j = 0; j < width; j++){ fprintf(f, "%d ", pixels[i * width + j]); } fprintf(f, "\n"); } fclose(f); } void initSobelFilter(int *filter, bool horizontal){ int filterWidth = FILTER_WIDTH; int val = 0; int margin = filterWidth / 2; for (int filterR = 0; filterR < filterWidth; filterR++){ for (int filterC = 0; filterC < filterWidth; filterC++){ if (horizontal == true){ if (filterC < margin){ val = 1; } else if (filterC == margin){ val = 0; } else{ val = -1; } if (filterR == margin){ val *= 2; } } else{ if (filterR < margin){ val = 1; } else if (filterR == margin){ val = 0; } else{ val = -1; } if (filterC == margin){ val *= 2; } } filter[filterR * filterWidth + filterC] = val; } } } void convertRgb2Gray (const uchar3 *in, int n, int *out){ for (int i = 0; i < n; i++){ out[i] = 0.299f * in[i].x + 0.587f * in[i].y + 0.114f * in[i].z; } } void getPixelsImportance (int *in, int width, int height, int *xFilter, int *yFilter, int filterWidth, int *out){ int margin = filterWidth / 2; for (int col = 0; col < width; col++){ for (int row = 0; row < height; row++){ int curIdx = row * width + col; float xSum = 0, ySum = 0; for (int filterRow = -margin; filterRow <= margin; filterRow++){ for (int filterCol = -margin; filterCol <= margin; filterCol++){ int filterIdx = (filterRow + margin) * filterWidth + filterCol + margin; int dx = min(width - 1, max(0, col + filterCol)); int dy = min(height - 1, max(0, row + filterRow)); int idx = dy * width + dx; xSum += in[idx] * xFilter[filterIdx]; ySum += in[idx] * yFilter[filterIdx]; } } out[curIdx] = abs(xSum) + abs(ySum); } } } void getLeastImportantPixels (int *in, int width, int height, int *out){ int lastRow = (height - 1) * width; memcpy(out + lastRow, in + lastRow, width * sizeof(int)); for (int row = height - 2; row >= 0; row--){ int below = row + 1; for (int col = 0; col < width; col++ ){ int idx = row * width + col; int leftCol = max(0, col - 1); int rightCol = min(width - 1, col + 1); int belowIdx = below * width + col; int leftBelowIdx = below * width + leftCol; int rightBelowIdx = below * width + rightCol; out[idx] = min(out[belowIdx], min(out[leftBelowIdx], out[rightBelowIdx])) + in[idx]; } } } void getSeamAt (int *in, int width, int height, int *out, int col){ out[0] = col; for (int row = 1; row < height; row++){ int col = out[row - 1]; int idx = row * width + col; int leftCol = max(0, col - 1); int rightCol = min(width - 1, col + 1); int leftIdx = row * width + leftCol; int rightIdx = row * width + rightCol; if (in[leftIdx] < in[idx]){ if (in[leftIdx] < in[rightIdx]) out[row] = leftCol; else out[row] = rightCol; } else{ if (in[idx] < in[rightIdx]) out[row] = col; else out[row] = rightCol; } } } void getLeastImportantSeam (int *in, int width, int height, int *out){ int minCol = 0; for (int i = 0; i < width; i++){ if (in[i] < in[minCol]) minCol = i; } // printf("min col %d-%d\n", minCol, in[minCol]); getSeamAt(in, width, height, out, minCol); } void removeSeam (const uchar3 *in, int width, int height, uchar3 *out, int *seam){ int newWidth = width - 1; for (int row = 0; row < height; row++){ int col = seam[row]; memcpy(out + row * newWidth, in + row * width, col * sizeof(uchar3)); int nextIdxOut = row * newWidth + col; int nextIdxIn = row * width + col + 1; memcpy(out + nextIdxOut, in + nextIdxIn, (newWidth - col) * sizeof(uchar3)); } } void seamCarvingHost(const uchar3 *in, int width, int height, uchar3 *out, int *xFilter, int *yFilter, int filterWidth){ // convert image to grayscale int *grayScalePixels = (int *)malloc(width * height * sizeof(int)); convertRgb2Gray(in, width * height, grayScalePixels); // edge detection int *pixelsImportance = (int *)malloc(width * height * sizeof(int)); getPixelsImportance(grayScalePixels, width, height, xFilter, yFilter, filterWidth, pixelsImportance); // find the least important seam int *leastPixelsImportance = (int *)malloc(width * height * sizeof(int)); getLeastImportantPixels(pixelsImportance, width, height, leastPixelsImportance); int *leastImportantSeam = (int *)malloc(height * sizeof(int)); getLeastImportantSeam(leastPixelsImportance, width, height, leastImportantSeam); // remove the least important seam removeSeam(in, width, height, out, leastImportantSeam); // free memories free(grayScalePixels); free(pixelsImportance); free(leastPixelsImportance); free(leastImportantSeam); } __global__ void convertRgb2GrayKernel(uchar3 *in, int width, int height, int *out){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (col < width && row < height){ int idx = row * width + col; out[idx] = 0.299f * in[idx].x + 0.587f * in[idx].y + 0.114f * in[idx].z; } } __global__ void getPixelsImportanceKernel (int *in, int width, int height, int filterWidth, int *out){ int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; if (col < width && row < height){ int margin = filterWidth / 2; int curIdx = row * width + col; float xSum = 0, ySum = 0; for (int filterRow = -margin; filterRow <= margin; filterRow++){ for (int filterCol = -margin; filterCol <= margin; filterCol++){ int filterIdx = (filterRow + margin) * filterWidth + filterCol + margin; int dx = min(width - 1, max(0, col + filterCol)); int dy = min(height - 1, max(0, row + filterRow)); int idx = dy * width + dx; xSum += in[idx] * dc_xFilter[filterIdx]; ySum += in[idx] * dc_yFilter[filterIdx]; } } out[curIdx] = abs(xSum) + abs(ySum); } } __global__ void upTriangle (int *in, int width, int height, int yStart, int yStop, int baseWith, int *out){ int xStart = baseWith * blockIdx.x * blockDim.x + threadIdx.x * baseWith; int xStop = xStart + baseWith - 1; for (int y = yStart; y >= yStop; y--){ for (int x = xStart; x <= xStop; x++){ if (x < width){ int idx = y * width + x; int below = (y + 1) * width + x; int left = (y + 1) * width + max(0, x - 1); int right = (y + 1) * width + min(width - 1, x + 1); out[idx] = in[idx] + min(out[below], min(out[left], out[right])); } } xStart += 1; xStop -= 1; } } __global__ void downTriangle (int *in, int width, int height, int yStart, int yStop, int baseWith, int *out){ int xStop = baseWith * (threadIdx.x + blockDim.x * blockIdx.x); int xStart = xStop - 1; for (int y = yStart; y >= yStop; y--){ for (int x = xStart; x <= xStop; x++){ if (x >= 0 && x < width){ int idx = y * width + x; int below = (y + 1) * width + x; int left = (y + 1) * width + max(0, x - 1); int right = (y + 1) * width + min(width - 1, x + 1); out[idx] = in[idx] + min(out[below], min(out[left], out[right])); } } xStart -= 1; xStop += 1; } } __global__ void getMinColSeam (int *in, int width, int *out){ extern __shared__ int s_mem[]; int i = (blockDim.x * blockIdx.x + threadIdx.x) * 2; if (i < width) s_mem[threadIdx.x] = i; if (i + 1 < width) s_mem[threadIdx.x + 1] = i + 1; __syncthreads(); for (int stride = 1; stride < 2 * blockDim.x; stride *= 2){ if (threadIdx.x % stride == 0){ if (i + stride < width){ if (in[s_mem[threadIdx.x]] > in[s_mem[threadIdx.x + stride]]){ s_mem[threadIdx.x] = s_mem[threadIdx.x + stride]; } } } __syncthreads(); } if (threadIdx.x == 0){ out[blockIdx.x] = s_mem[0]; } } void seamCarvingDevice(const uchar3 *in, int width, int height, uchar3 *out, int *xFilter, int *yFilter, int filterWidth, dim3 blockSize, int baseWith){ // prepare some values int lastRowIdx = (height - 1) * width; int stripHeight = baseWith % 2 == 0 ? baseWith / 2 + 1 : (baseWith + 1) / 2 + 1; int gridSizeTriangle = (width - 1) / (blockSize.x * baseWith) + 1; int minColGridSize = (width - 1) / (2 * blockSize.x) + 1; size_t dataSize = width * height * sizeof(uchar3); size_t rowSize = width * sizeof(int); size_t grayScaleSize = width * height * sizeof(int); dim3 gridSize((width - 1) / blockSize.x + 1, (height - 1) / blockSize.y + 1); // allocate device memories uchar3 *d_in; int *d_grayScalePixels, *d_pixelsImportance, *d_leastImportantPixels, *d_minCol; CHECK(cudaMalloc(&d_in, dataSize)); CHECK(cudaMalloc(&d_grayScalePixels, grayScaleSize)); CHECK(cudaMalloc(&d_pixelsImportance, grayScaleSize)); CHECK(cudaMalloc(&d_leastImportantPixels, grayScaleSize)); CHECK(cudaMalloc(&d_minCol, minColGridSize * sizeof(int))); // allocate host memories int *leastPixelsImportance = (int *)malloc(grayScaleSize); int *leastImportantSeam = (int *)malloc(height * sizeof(int)); int *minCol = (int *)malloc(minColGridSize * sizeof(int)); // copy data to device memories CHECK(cudaMemcpy(d_in, in, dataSize, cudaMemcpyHostToDevice)); // convert image to grayscale convertRgb2GrayKernel<<<gridSize, blockSize>>>(d_in, width, height, d_grayScalePixels); CHECK(cudaGetLastError()); // edge detection getPixelsImportanceKernel<<<gridSize, blockSize>>>(d_grayScalePixels, width, height, filterWidth, d_pixelsImportance); CHECK(cudaGetLastError()); // find the least important pixels CHECK(cudaMemcpy(d_leastImportantPixels + lastRowIdx, d_pixelsImportance + lastRowIdx, rowSize, cudaMemcpyDeviceToDevice)); for (int y = height - 2; y >= 0; y -= stripHeight){ int yStart = y; int yStop = max(0, yStart - stripHeight + 1); upTriangle<<<gridSizeTriangle, blockSize.x>>>(d_pixelsImportance, width, height, yStart, yStop, baseWith, d_leastImportantPixels); yStart = max(0, yStart - 1); yStop = max(0, yStart - stripHeight + 1); downTriangle<<<gridSizeTriangle + 1, blockSize.x>>>(d_pixelsImportance, width, height, yStart, yStop, baseWith, d_leastImportantPixels); } CHECK(cudaMemcpy(leastPixelsImportance, d_leastImportantPixels, grayScaleSize, cudaMemcpyDeviceToHost)); // find the least important seam getMinColSeam<<<minColGridSize, blockSize.x, blockSize.x * 2 * sizeof(int)>>>(d_leastImportantPixels, width, d_minCol); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(minCol, d_minCol, minColGridSize * sizeof(int), cudaMemcpyDeviceToHost)); int mc = minCol[0]; for (int i = 0; i < minColGridSize; i += 1){ if (leastPixelsImportance[minCol[i]] < leastPixelsImportance[mc]){ mc = minCol[i]; } } getSeamAt(leastPixelsImportance, width, height, leastImportantSeam, mc); // remove the least important seam removeSeam(in, width, height, out, leastImportantSeam); // free memories CHECK(cudaFree(d_in)); CHECK(cudaFree(d_grayScalePixels)); CHECK(cudaFree(d_pixelsImportance)); CHECK(cudaFree(d_leastImportantPixels)); CHECK(cudaFree(d_minCol)); free(leastPixelsImportance); free(leastImportantSeam); free(minCol); } void seamCarving(const uchar3 *in, int width, int height, uchar3 *out, int newWidth, int *xFilter, int *yFilter, int filterWidth, bool usingDevice=false, dim3 blockSize=dim3(1, 1), int baseWith = 0){ if (usingDevice == false){ printf("\nSeam carving by host\n"); } else{ printf("\nSeam carving by device\n"); // copy x filter, y filter on host to dc_x filter, dc_y filter on device size_t filterSize = filterWidth * filterWidth * sizeof(int); CHECK(cudaMemcpyToSymbol(dc_xFilter, xFilter, filterSize)); CHECK(cudaMemcpyToSymbol(dc_yFilter, yFilter, filterSize)); } GpuTimer timer; timer.Start(); // allocate host memories uchar3 *src = (uchar3 *)malloc(width * height * sizeof(uchar3)); uchar3 *dst = (uchar3 *)malloc(width * height * sizeof(uchar3)); // store the pointer for freeing uchar3 *originalSrc = src; uchar3 *originalDst = dst; // copy input data to src pointer memcpy(src, in, width * height * sizeof(uchar3)); // do the seam carving by decrease width by 1 until newWidth for (int w = width; w > newWidth; w--){ // resize the dst pointer with current width - 1; dst = (uchar3 *)realloc(dst, (w-1) * height * sizeof(uchar3)); // seamCarving the picture if (usingDevice == false){ seamCarvingHost(src, w, height, dst, xFilter, yFilter, filterWidth); } else{ seamCarvingDevice(src, w, height, dst, xFilter, yFilter, filterWidth, blockSize, baseWith); } // swap src and dst uchar3 * temp = src; src = dst; dst = temp; } // copy the output data to the out pointer memcpy(out, src, newWidth * height * sizeof(uchar3)); // free memories free(originalDst); free(originalSrc); timer.Stop(); printf("Time: %.3f ms\n", timer.Eplapsed()); } float computeError (uchar3 *a1, uchar3* a2, int n){ float err = 0; for (int i = 0; i < n; i++){ err += abs((int)a1[i].x - (int)a2[i].x); err += abs((int)a1[i].y - (int)a2[i].y); err += abs((int)a1[i].z - (int)a2[i].z); } err /= (n * 3); return err; } void printError (uchar3 *a1, uchar3 *a2, int width, int height){ float err = computeError(a1, a2, width * height); printf("Error: %f\n", err); } void printDeviceInfo(int codeVer){ cudaDeviceProp devProv; CHECK(cudaGetDeviceProperties(&devProv, 0)); printf("Vesrion of code: %d\n", codeVer); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %lu bytes\n", devProv.totalGlobalMem); printf("CMEM: %lu bytes\n", devProv.totalConstMem); printf("L2 cache: %i bytes\n", devProv.l2CacheSize); printf("SMEM / one SM: %lu bytes\n", devProv.sharedMemPerMultiprocessor); printf("****************************\n"); } char *concatStr(const char *s1, const char *s2){ char *result = (char *)malloc(strlen(s1) + strlen(s2) + 1); strcpy(result, s1); strcat(result, s2); return result; } int main (int argc, char **argv){ if (argc != 4 && argc != 6){ printf("The number of arguments is invalid\n"); return EXIT_FAILURE; } int seamCount = atoi(argv[2]); int baseWith = atoi(argv[3]); // Read input image file int width, height; uchar3 *inPixels; readRGBPnm(argv[1], width, height, inPixels); printf("\nImage size (width * height): %i x %i\n", width, height); int newWidth = width - seamCount; if (newWidth <= 0){ printf("The count of removed seams must be smaller than the width of the image"); return EXIT_FAILURE; } printf("\nNew image size (width * height): %i x %i\n", newWidth, height); // print device info int codeVer = 1; printDeviceInfo(codeVer); // init out pointer uchar3 *correctOutPixels = (uchar3 *)malloc(newWidth * height * sizeof(uchar3)); uchar3 *outPixels = (uchar3 *)malloc(newWidth * height * sizeof(uchar3)); // Set up x sobel filter and y sobel filter int filterWidth = FILTER_WIDTH; int *xFilter = (int *)malloc(filterWidth * filterWidth * sizeof(int)); int *yFilter = (int *)malloc(filterWidth * filterWidth * sizeof(int)); initSobelFilter(xFilter, true); initSobelFilter(yFilter, false); // Seam carving not using device seamCarving(inPixels, width, height, correctOutPixels, newWidth, xFilter, yFilter, filterWidth); // get input block size dim3 blockSize(32, 32); //default if (argc == 5){ blockSize.x = atoi(argv[3]); blockSize.y = atoi(argv[4]); } // Seam carving using device seamCarving(inPixels, width, height, outPixels, newWidth, xFilter, yFilter, filterWidth, true, blockSize, baseWith); printError(correctOutPixels, outPixels, newWidth, height); // Write results to files char *outFileNameBase = strtok(argv[1], "."); //get rid of extension writeRGBPnm(correctOutPixels, newWidth, height, concatStr(outFileNameBase, "_host.pnm")); writeRGBPnm(outPixels, newWidth, height, concatStr(outFileNameBase, "_device.pnm")); // Free memories free(inPixels); free(xFilter); free(yFilter); free(correctOutPixels); free(outPixels); }
61d870f7d9922cc2d1f96df2236a7da5e6160f87.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <hip/hip_runtime.h> #include <gtest/gtest.h> #include <random> #include <vector> #include "dali/core/mm/async_pool.h" #include "dali/core/dev_buffer.h" #include "dali/core/mm/mm_test_utils.h" #include "dali/core/cuda_stream.h" #include "rmm/mr/device/pool_memory_resource.hpp" namespace dali { namespace mm { struct GPUHog { ~GPUHog() { if (mem) { CUDA_DTOR_CALL(hipFree(mem)); mem = nullptr; } } void init() { if (!mem) CUDA_CALL(hipMalloc(&mem, size)); } void run(hipStream_t stream, int count = 1) { for (int i = 0; i < count; i++) { CUDA_CALL(hipMemsetAsync(mem, i+1, size, stream)); } } uint8_t *mem = nullptr; size_t size = 16<<20; }; TEST(MMAsyncPool, SingleStreamReuse) { GPUHog hog; hog.init(); HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true); test::test_device_resource upstream; async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream); stream_view sv(stream); int size1 = 1<<20; void *ptr = pool.allocate_async(size1, sv); hog.run(stream, 2); pool.deallocate_async(ptr, size1, sv); void *p2 = pool.allocate_async(size1, sv); CUDA_CALL(hipStreamSynchronize(stream)); EXPECT_EQ(ptr, p2); } TEST(MMAsyncPool, TwoStream) { mm::test::test_device_resource upstream; HIPStreamMasqueradingAsCUDA s1 = HIPStreamMasqueradingAsCUDA::Create(true); HIPStreamMasqueradingAsCUDA s2 = HIPStreamMasqueradingAsCUDA::Create(true); stream_view sv1(s1); stream_view sv2(s2); GPUHog hog; hog.init(); const int min_success = 10; const int max_not_busy = 100; int stream_not_busy = 0; int success = 0; while (success < min_success) { async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream); void *p1 = pool.allocate_async(1000, sv1); hog.run(s1); pool.deallocate_async(p1, 1000, sv1); void *p2 = pool.allocate_async(1000, sv2); void *p3 = pool.allocate_async(1000, sv1); hipError_t e = hipStreamQuery(s1); if (e != hipErrorNotReady) { std::cerr << "Stream s1 finished before attempt to allocate on s2 was made - retrying\n"; CUDA_CALL(hipGetLastError()); if (++stream_not_busy > max_not_busy) { FAIL() << "Stream s1 finished - test unreliable."; } continue; } stream_not_busy = 0; ASSERT_NE(p1, p2); ASSERT_EQ(p1, p3); CUDA_CALL(hipStreamSynchronize(s1)); success++; CUDA_CALL(hipStreamSynchronize(s2)); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } namespace { __global__ void Check(const void *ptr, size_t size, uint8_t fill, int *failures) { size_t idx = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (idx < size) { if (static_cast<const uint8_t*>(ptr)[idx] != fill) atomicAdd(failures, 1); } } struct block { void *ptr; size_t size; uint8_t fill; hipStream_t stream; }; template <typename Pool, typename Mutex> void AsyncPoolTest(Pool &pool, vector<block> &blocks, Mutex &mtx, HIPStreamMasqueradingAsCUDA &stream, int max_iters = 20000, bool use_hog = false) { stream_view sv(stream); std::mt19937_64 rng(12345); std::poisson_distribution<> size_dist(1024); const int max_size = 1 << 20; std::uniform_int_distribution<> sync_dist(10, 10); std::bernoulli_distribution action_dist; std::bernoulli_distribution hog_dist(0.05f); std::uniform_int_distribution<> fill_dist(1, 255); DeviceBuffer<int> failure_buf; int failures = 0; failure_buf.from_host(&failures, 1, stream); GPUHog hog; if (use_hog) hog.init(); int hogs = 0; int max_hogs = sync_dist(rng); CUDAEvent event = CUDAEvent::Create(); for (int i = 0; i < max_iters; i++) { if (use_hog && hog_dist(rng)) { if (hogs++ > max_hogs) { CUDA_CALL(hipStreamSynchronize(stream)); max_hogs = sync_dist(rng); } hog.run(stream); } if (action_dist(rng) || blocks.empty()) { size_t size; do { size = size_dist(rng); } while (size > max_size); uint8_t fill = fill_dist(rng); void *ptr = stream ? pool.allocate_async(size, sv) : pool.allocate(size); CUDA_CALL(hipMemsetAsync(ptr, fill, size, stream)); { std::lock_guard<Mutex> guard(mtx); blocks.push_back({ ptr, size, fill, stream }); } } else { block blk; { std::lock_guard<Mutex> guard(mtx); if (blocks.empty()) continue; int i = std::uniform_int_distribution<>(0, blocks.size()-1)(rng); std::swap(blocks[i], blocks.back()); blk = blocks.back(); blocks.pop_back(); } if (blk.stream != stream) { if (stream) { CUDA_CALL(hipEventRecord(event, blk.stream)); CUDA_CALL(hipStreamWaitEvent(stream, event, 0)); } else { CUDA_CALL(hipStreamSynchronize(blk.stream)); } } hipLaunchKernelGGL(( Check), dim3(div_ceil(blk.size, 1024)), dim3(1024), 0, stream, blk.ptr, blk.size, blk.fill, failure_buf); if (stream) { pool.deallocate_async(blk.ptr, blk.size, sv); } else { CUDA_CALL(hipStreamSynchronize(stream)); pool.deallocate(blk.ptr, blk.size); } } } copyD2H<int>(&failures, failure_buf, 1, stream); CUDA_CALL(hipStreamSynchronize(stream)); ASSERT_EQ(failures, 0); } } // namespace TEST(MMAsyncPool, SingleStreamRandom) { HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true); test::test_device_resource upstream; { async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream); } CUDA_CALL(hipStreamSynchronize(stream)); std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, MultiThreadedSingleStreamRandom) { HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true); mm::test::test_device_resource upstream; { vector<block> blocks; std::mutex mtx; async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream); vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { AsyncPoolTest(pool, blocks, mtx, stream); })); } for (auto &t : threads) t.join(); } CUDA_CALL(hipStreamSynchronize(stream)); std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, MultiThreadedMultiStreamRandom) { mm::test::test_device_resource upstream; { async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream); vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream); CUDA_CALL(hipStreamSynchronize(stream)); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, MultiStreamRandomWithGPUHogs) { mm::test::test_device_resource upstream; { async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream, false); vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream, 20000, true); CUDA_CALL(hipStreamSynchronize(stream)); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, CrossStream) { mm::test::test_device_resource upstream; { async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream, false); vector<std::thread> threads; vector<HIPStreamMasqueradingAsCUDA> streams; vector<block> blocks; std::mutex mtx; const int N = 10; streams.resize(N); for (int t = 0; t < N; t++) { if (t != 0) // keep empty stream at index 0 to mix sync/async allocations streams[t] = HIPStreamMasqueradingAsCUDA::Create(true); threads.push_back(std::thread([&, t]() { AsyncPoolTest(pool, blocks, mtx, streams[t]); CUDA_CALL(hipStreamSynchronize(streams[t])); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, CrossStreamWithHogs) { mm::test::test_device_resource upstream; { async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream); vector<std::thread> threads; vector<HIPStreamMasqueradingAsCUDA> streams; vector<block> blocks; std::mutex mtx; const int N = 10; streams.resize(N); for (int t = 0; t < N; t++) { if (t != 0) // keep empty stream at index 0 to mix sync/async allocations streams[t] = HIPStreamMasqueradingAsCUDA::Create(true); threads.push_back(std::thread([&, t]() { AsyncPoolTest(pool, blocks, mtx, streams[t], 10000, true); CUDA_CALL(hipStreamSynchronize(streams[t])); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } } // namespace mm } // namespace dali
61d870f7d9922cc2d1f96df2236a7da5e6160f87.cu
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda_runtime.h> #include <gtest/gtest.h> #include <random> #include <vector> #include "dali/core/mm/async_pool.h" #include "dali/core/dev_buffer.h" #include "dali/core/mm/mm_test_utils.h" #include "dali/core/cuda_stream.h" #include "rmm/mr/device/pool_memory_resource.hpp" namespace dali { namespace mm { struct GPUHog { ~GPUHog() { if (mem) { CUDA_DTOR_CALL(cudaFree(mem)); mem = nullptr; } } void init() { if (!mem) CUDA_CALL(cudaMalloc(&mem, size)); } void run(cudaStream_t stream, int count = 1) { for (int i = 0; i < count; i++) { CUDA_CALL(cudaMemsetAsync(mem, i+1, size, stream)); } } uint8_t *mem = nullptr; size_t size = 16<<20; }; TEST(MMAsyncPool, SingleStreamReuse) { GPUHog hog; hog.init(); CUDAStream stream = CUDAStream::Create(true); test::test_device_resource upstream; async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream); stream_view sv(stream); int size1 = 1<<20; void *ptr = pool.allocate_async(size1, sv); hog.run(stream, 2); pool.deallocate_async(ptr, size1, sv); void *p2 = pool.allocate_async(size1, sv); CUDA_CALL(cudaStreamSynchronize(stream)); EXPECT_EQ(ptr, p2); } TEST(MMAsyncPool, TwoStream) { mm::test::test_device_resource upstream; CUDAStream s1 = CUDAStream::Create(true); CUDAStream s2 = CUDAStream::Create(true); stream_view sv1(s1); stream_view sv2(s2); GPUHog hog; hog.init(); const int min_success = 10; const int max_not_busy = 100; int stream_not_busy = 0; int success = 0; while (success < min_success) { async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream); void *p1 = pool.allocate_async(1000, sv1); hog.run(s1); pool.deallocate_async(p1, 1000, sv1); void *p2 = pool.allocate_async(1000, sv2); void *p3 = pool.allocate_async(1000, sv1); cudaError_t e = cudaStreamQuery(s1); if (e != cudaErrorNotReady) { std::cerr << "Stream s1 finished before attempt to allocate on s2 was made - retrying\n"; CUDA_CALL(cudaGetLastError()); if (++stream_not_busy > max_not_busy) { FAIL() << "Stream s1 finished - test unreliable."; } continue; } stream_not_busy = 0; ASSERT_NE(p1, p2); ASSERT_EQ(p1, p3); CUDA_CALL(cudaStreamSynchronize(s1)); success++; CUDA_CALL(cudaStreamSynchronize(s2)); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } namespace { __global__ void Check(const void *ptr, size_t size, uint8_t fill, int *failures) { size_t idx = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (idx < size) { if (static_cast<const uint8_t*>(ptr)[idx] != fill) atomicAdd(failures, 1); } } struct block { void *ptr; size_t size; uint8_t fill; cudaStream_t stream; }; template <typename Pool, typename Mutex> void AsyncPoolTest(Pool &pool, vector<block> &blocks, Mutex &mtx, CUDAStream &stream, int max_iters = 20000, bool use_hog = false) { stream_view sv(stream); std::mt19937_64 rng(12345); std::poisson_distribution<> size_dist(1024); const int max_size = 1 << 20; std::uniform_int_distribution<> sync_dist(10, 10); std::bernoulli_distribution action_dist; std::bernoulli_distribution hog_dist(0.05f); std::uniform_int_distribution<> fill_dist(1, 255); DeviceBuffer<int> failure_buf; int failures = 0; failure_buf.from_host(&failures, 1, stream); GPUHog hog; if (use_hog) hog.init(); int hogs = 0; int max_hogs = sync_dist(rng); CUDAEvent event = CUDAEvent::Create(); for (int i = 0; i < max_iters; i++) { if (use_hog && hog_dist(rng)) { if (hogs++ > max_hogs) { CUDA_CALL(cudaStreamSynchronize(stream)); max_hogs = sync_dist(rng); } hog.run(stream); } if (action_dist(rng) || blocks.empty()) { size_t size; do { size = size_dist(rng); } while (size > max_size); uint8_t fill = fill_dist(rng); void *ptr = stream ? pool.allocate_async(size, sv) : pool.allocate(size); CUDA_CALL(cudaMemsetAsync(ptr, fill, size, stream)); { std::lock_guard<Mutex> guard(mtx); blocks.push_back({ ptr, size, fill, stream }); } } else { block blk; { std::lock_guard<Mutex> guard(mtx); if (blocks.empty()) continue; int i = std::uniform_int_distribution<>(0, blocks.size()-1)(rng); std::swap(blocks[i], blocks.back()); blk = blocks.back(); blocks.pop_back(); } if (blk.stream != stream) { if (stream) { CUDA_CALL(cudaEventRecord(event, blk.stream)); CUDA_CALL(cudaStreamWaitEvent(stream, event, 0)); } else { CUDA_CALL(cudaStreamSynchronize(blk.stream)); } } Check<<<div_ceil(blk.size, 1024), 1024, 0, stream>>>( blk.ptr, blk.size, blk.fill, failure_buf); if (stream) { pool.deallocate_async(blk.ptr, blk.size, sv); } else { CUDA_CALL(cudaStreamSynchronize(stream)); pool.deallocate(blk.ptr, blk.size); } } } copyD2H<int>(&failures, failure_buf, 1, stream); CUDA_CALL(cudaStreamSynchronize(stream)); ASSERT_EQ(failures, 0); } } // namespace TEST(MMAsyncPool, SingleStreamRandom) { CUDAStream stream = CUDAStream::Create(true); test::test_device_resource upstream; { async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream); } CUDA_CALL(cudaStreamSynchronize(stream)); std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, MultiThreadedSingleStreamRandom) { CUDAStream stream = CUDAStream::Create(true); mm::test::test_device_resource upstream; { vector<block> blocks; std::mutex mtx; async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream); vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { AsyncPoolTest(pool, blocks, mtx, stream); })); } for (auto &t : threads) t.join(); } CUDA_CALL(cudaStreamSynchronize(stream)); std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, MultiThreadedMultiStreamRandom) { mm::test::test_device_resource upstream; { async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream); vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { CUDAStream stream = CUDAStream::Create(true); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream); CUDA_CALL(cudaStreamSynchronize(stream)); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, MultiStreamRandomWithGPUHogs) { mm::test::test_device_resource upstream; { async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream, false); vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { CUDAStream stream = CUDAStream::Create(true); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream, 20000, true); CUDA_CALL(cudaStreamSynchronize(stream)); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, CrossStream) { mm::test::test_device_resource upstream; { async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream, false); vector<std::thread> threads; vector<CUDAStream> streams; vector<block> blocks; std::mutex mtx; const int N = 10; streams.resize(N); for (int t = 0; t < N; t++) { if (t != 0) // keep empty stream at index 0 to mix sync/async allocations streams[t] = CUDAStream::Create(true); threads.push_back(std::thread([&, t]() { AsyncPoolTest(pool, blocks, mtx, streams[t]); CUDA_CALL(cudaStreamSynchronize(streams[t])); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, CrossStreamWithHogs) { mm::test::test_device_resource upstream; { async_pool_base<memory_kind::device, free_tree, std::mutex> pool(&upstream); vector<std::thread> threads; vector<CUDAStream> streams; vector<block> blocks; std::mutex mtx; const int N = 10; streams.resize(N); for (int t = 0; t < N; t++) { if (t != 0) // keep empty stream at index 0 to mix sync/async allocations streams[t] = CUDAStream::Create(true); threads.push_back(std::thread([&, t]() { AsyncPoolTest(pool, blocks, mtx, streams[t], 10000, true); CUDA_CALL(cudaStreamSynchronize(streams[t])); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } } // namespace mm } // namespace dali
a93eb71dcf87b9856328bd1c04c9d96c33e79641.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void query_ball_point_gpu(int b, int n, int m, const float *radius, int nsample, const float *xyz1, const float *xyz2, int *idx) { int index = threadIdx.x; xyz1 += n*3*index; xyz2 += m*3*index; idx += m*nsample*index; for (int j=0;j<m;++j) { int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball float x2=xyz2[j*3+0]; float y2=xyz2[j*3+1]; float z2=xyz2[j*3+2]; float x1=xyz1[k*3+0]; float y1=xyz1[k*3+1]; float z1=xyz1[k*3+2]; float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius[0]) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) idx[j*nsample+l] = k; } idx[j*nsample+cnt] = k; cnt+=1; } } } }
a93eb71dcf87b9856328bd1c04c9d96c33e79641.cu
#include "includes.h" __global__ void query_ball_point_gpu(int b, int n, int m, const float *radius, int nsample, const float *xyz1, const float *xyz2, int *idx) { int index = threadIdx.x; xyz1 += n*3*index; xyz2 += m*3*index; idx += m*nsample*index; for (int j=0;j<m;++j) { int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball float x2=xyz2[j*3+0]; float y2=xyz2[j*3+1]; float z2=xyz2[j*3+2]; float x1=xyz1[k*3+0]; float y1=xyz1[k*3+1]; float z1=xyz1[k*3+2]; float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius[0]) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) idx[j*nsample+l] = k; } idx[j*nsample+cnt] = k; cnt+=1; } } } }
ff7b26cd2f09fd063012315d228690e42501b8c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file display.cu * \brief Contain classes to color encode Optical flow fields. * \copyright 2015, Juan David Adarve, ANU. See AUTHORS for more details * \license 3-clause BSD, see LICENSE for more details */ #include <exception> #include <iostream> #include "flowfilter/image.h" #include "flowfilter/colorwheel.h" #include "flowfilter/gpu/util.h" #include "flowfilter/gpu/display.h" #include "flowfilter/gpu/device/display_k.h" namespace flowfilter { namespace gpu { FlowToColor::FlowToColor() : Stage() { __configured = false; __inputFlowSet = false; __maxflow = 1.0f; } FlowToColor::FlowToColor(flowfilter::gpu::GPUImage inputFlow, const float maxflow) : Stage() { __configured = false; __inputFlowSet = false; setInputFlow(inputFlow); setMaxFlow(maxflow); configure(); } FlowToColor::~FlowToColor() { // nothing to do } void FlowToColor::configure() { if(!__inputFlowSet) { std::cerr << "ERROR: FlowToColor::configure(): input flow not set" << std::endl; throw std::exception(); } // creates an RGBA images from the RGB color wheel image_t wheelRGBA = getColorWheelRGBA(); __colorWheel = GPUImage(wheelRGBA.height, wheelRGBA.width, wheelRGBA.depth, sizeof(unsigned char)); // upload RGBA color to device __colorWheel.upload(wheelRGBA); // configure texture to read uchar4 with normalized coordinates __colorWheelTexture = GPUTexture(__colorWheel, hipChannelFormatKindUnsigned, hipReadModeElementType, true); // output coloured optical flow __colorFlow = GPUImage(__inputFlow.height(), __inputFlow.width(), 4, sizeof(unsigned char)); // configure block and grid sizes __block = dim3(32, 32, 1); configureKernelGrid(__inputFlow.height(), __inputFlow.width(), __block, __grid); __configured = true; } void FlowToColor::compute() { startTiming(); if(!__configured) { std::cerr << "ERROR: FlowToColor::compute(): Stage not configured" << std::endl; throw std::exception(); } hipLaunchKernelGGL(( flowToColor_k), dim3(__grid), dim3(__block), 0, __stream, __inputFlow.wrap<float2>(), __colorWheelTexture.getTextureObject(), __maxflow, __colorFlow.wrap<uchar4>() ); stopTiming(); } void FlowToColor::setInputFlow(GPUImage inputFlow) { if(inputFlow.depth() != 2) { std::cerr << "ERROR: FlowToColor::setInputFlow(): input flow should have depth 2: " << inputFlow.depth() << std::endl; throw std::exception(); } if(inputFlow.itemSize() != 4) { std::cerr << "ERROR: FlowToColor::setInputFlow(): input flow should have item size 4: " << inputFlow.itemSize() << std::endl; throw std::exception(); } __inputFlow = inputFlow; __inputFlowSet = true; } GPUImage FlowToColor::getColorFlow() { return __colorFlow; } float FlowToColor::getMaxFlow() const { return __maxflow; } void FlowToColor::setMaxFlow(const float maxflow) { if(maxflow <= 0.0f) { std::cerr << "ERROR: FlowToColor::setMaxFlow(): maxflow should be greater than 0.0: " << maxflow << std::endl; throw std::exception(); } __maxflow = maxflow; } void FlowToColor::downloadColorFlow(flowfilter::image_t& colorFlow) { __colorFlow.download(colorFlow); } }; // namespace gpu }; // namespace flowfilter
ff7b26cd2f09fd063012315d228690e42501b8c5.cu
/** * \file display.cu * \brief Contain classes to color encode Optical flow fields. * \copyright 2015, Juan David Adarve, ANU. See AUTHORS for more details * \license 3-clause BSD, see LICENSE for more details */ #include <exception> #include <iostream> #include "flowfilter/image.h" #include "flowfilter/colorwheel.h" #include "flowfilter/gpu/util.h" #include "flowfilter/gpu/display.h" #include "flowfilter/gpu/device/display_k.h" namespace flowfilter { namespace gpu { FlowToColor::FlowToColor() : Stage() { __configured = false; __inputFlowSet = false; __maxflow = 1.0f; } FlowToColor::FlowToColor(flowfilter::gpu::GPUImage inputFlow, const float maxflow) : Stage() { __configured = false; __inputFlowSet = false; setInputFlow(inputFlow); setMaxFlow(maxflow); configure(); } FlowToColor::~FlowToColor() { // nothing to do } void FlowToColor::configure() { if(!__inputFlowSet) { std::cerr << "ERROR: FlowToColor::configure(): input flow not set" << std::endl; throw std::exception(); } // creates an RGBA images from the RGB color wheel image_t wheelRGBA = getColorWheelRGBA(); __colorWheel = GPUImage(wheelRGBA.height, wheelRGBA.width, wheelRGBA.depth, sizeof(unsigned char)); // upload RGBA color to device __colorWheel.upload(wheelRGBA); // configure texture to read uchar4 with normalized coordinates __colorWheelTexture = GPUTexture(__colorWheel, cudaChannelFormatKindUnsigned, cudaReadModeElementType, true); // output coloured optical flow __colorFlow = GPUImage(__inputFlow.height(), __inputFlow.width(), 4, sizeof(unsigned char)); // configure block and grid sizes __block = dim3(32, 32, 1); configureKernelGrid(__inputFlow.height(), __inputFlow.width(), __block, __grid); __configured = true; } void FlowToColor::compute() { startTiming(); if(!__configured) { std::cerr << "ERROR: FlowToColor::compute(): Stage not configured" << std::endl; throw std::exception(); } flowToColor_k<<<__grid, __block, 0, __stream>>>( __inputFlow.wrap<float2>(), __colorWheelTexture.getTextureObject(), __maxflow, __colorFlow.wrap<uchar4>() ); stopTiming(); } void FlowToColor::setInputFlow(GPUImage inputFlow) { if(inputFlow.depth() != 2) { std::cerr << "ERROR: FlowToColor::setInputFlow(): input flow should have depth 2: " << inputFlow.depth() << std::endl; throw std::exception(); } if(inputFlow.itemSize() != 4) { std::cerr << "ERROR: FlowToColor::setInputFlow(): input flow should have item size 4: " << inputFlow.itemSize() << std::endl; throw std::exception(); } __inputFlow = inputFlow; __inputFlowSet = true; } GPUImage FlowToColor::getColorFlow() { return __colorFlow; } float FlowToColor::getMaxFlow() const { return __maxflow; } void FlowToColor::setMaxFlow(const float maxflow) { if(maxflow <= 0.0f) { std::cerr << "ERROR: FlowToColor::setMaxFlow(): maxflow should be greater than 0.0: " << maxflow << std::endl; throw std::exception(); } __maxflow = maxflow; } void FlowToColor::downloadColorFlow(flowfilter::image_t& colorFlow) { __colorFlow.download(colorFlow); } }; // namespace gpu }; // namespace flowfilter
984c68da118952f012c2ae20a71ca5ffc7e9f00b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <arrayfire.h> #include <hip/hip_complex.h> #include <cusp/complex.h> #include <hip/hip_runtime.h> #include "cuda_sample.h" #include <Python.h> #include <numpy/arrayobject.h> #include "pyGnufft.h" #include "afnumpyapi.h" //typedef cusp::complex<float> complex32_t; typedef hipComplex complex32_t; __global__ void makeComplex(float * real, float * imag, int npts, complex32_t * cplx){ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < npts ) //cplx[i] = complex32_t(real[i], imag[i]); cplx[i] = make_cuComplex(real[i], imag[i]); } PyObject *cDebug(PyObject *self, PyObject *prhs) { PyObject *in0, *in1; if (!(PyArg_ParseTuple(prhs, "OO", &in0, &in1))){ return NULL; } // data POINTERS float * d_real = (float *) PyAfnumpy_DevicePtr(in0); float * d_imag = (float *) PyAfnumpy_DevicePtr(in1); int npoints = PyAfnumpy_Size(in0); complex32_t * d_cplx; hipMalloc(&d_cplx, npoints * sizeof(complex32_t)); dim3 threads(256,1,1); int t1 = npoints / threads.x + 1; dim3 blocks(t1,1,1); hipLaunchKernelGGL(( makeComplex), dim3(blocks), dim3(threads) , 0, 0, d_real, d_imag, npoints, d_cplx); // build int nd = 2; int gdims[] = {512, 1}; af::cfloat * buf = reinterpret_cast<af::cfloat *>(d_cplx); PyObject * out = PyAfnumpy_FromData(nd, gdims, CMPLX32, buf, true); return out; }
984c68da118952f012c2ae20a71ca5ffc7e9f00b.cu
#include <stdlib.h> #include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <arrayfire.h> #include <cuComplex.h> #include <cusp/complex.h> #include <cuda.h> #include "cuda_sample.h" #include <Python.h> #include <numpy/arrayobject.h> #include "pyGnufft.h" #include "afnumpyapi.h" //typedef cusp::complex<float> complex32_t; typedef cuComplex complex32_t; __global__ void makeComplex(float * real, float * imag, int npts, complex32_t * cplx){ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < npts ) //cplx[i] = complex32_t(real[i], imag[i]); cplx[i] = make_cuComplex(real[i], imag[i]); } PyObject *cDebug(PyObject *self, PyObject *prhs) { PyObject *in0, *in1; if (!(PyArg_ParseTuple(prhs, "OO", &in0, &in1))){ return NULL; } // data POINTERS float * d_real = (float *) PyAfnumpy_DevicePtr(in0); float * d_imag = (float *) PyAfnumpy_DevicePtr(in1); int npoints = PyAfnumpy_Size(in0); complex32_t * d_cplx; cudaMalloc(&d_cplx, npoints * sizeof(complex32_t)); dim3 threads(256,1,1); int t1 = npoints / threads.x + 1; dim3 blocks(t1,1,1); makeComplex<<< blocks, threads >>> (d_real, d_imag, npoints, d_cplx); // build int nd = 2; int gdims[] = {512, 1}; af::cfloat * buf = reinterpret_cast<af::cfloat *>(d_cplx); PyObject * out = PyAfnumpy_FromData(nd, gdims, CMPLX32, buf, true); return out; }
f75633afa8fc6fb8047cda3c7f4a8716dc4963d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //****************************************************************************** // // File: ModCubRoot.cu // // Version: 1.0 //****************************************************************************** // Number of threads per block. #define NT 1024 // Overall counter variable in global memory. __device__ unsigned long long int count; __device__ unsigned long long int arraySize = 3; /** * Device kernel to compute modular cube root. * * * @author Omkar Kakade */ extern "C" __global__ void computeModularCubeRoot ( unsigned long long int c, unsigned long long int N, unsigned long long int *final_M) { unsigned long long int thr, size, rank; unsigned long long int local_c; unsigned long long int local_m; unsigned long long int increment; // Determine number of threads and this thread's rank. thr = threadIdx.x; size = gridDim.x*NT; rank = blockIdx.x*NT + thr; // Initialize per-thread. local_c = 0; local_m = 0; increment = 1; unsigned long long int atom_result =0; // Compute modular cube roots. for (unsigned long long int i = rank; i < N; i += size) { unsigned long long int first_mod = (i)%N; unsigned long long int second_mod = (first_mod * i)%N; unsigned long long int third_mod = (second_mod * i)%N; local_c = third_mod; local_m = i; if (local_c == c){ // atomic counter value updation. atom_result = atomicAdd(&count,increment); if (atom_result < arraySize) { final_M[atom_result]=local_m; } } } }
f75633afa8fc6fb8047cda3c7f4a8716dc4963d8.cu
//****************************************************************************** // // File: ModCubRoot.cu // // Version: 1.0 //****************************************************************************** // Number of threads per block. #define NT 1024 // Overall counter variable in global memory. __device__ unsigned long long int count; __device__ unsigned long long int arraySize = 3; /** * Device kernel to compute modular cube root. * * * @author Omkar Kakade */ extern "C" __global__ void computeModularCubeRoot ( unsigned long long int c, unsigned long long int N, unsigned long long int *final_M) { unsigned long long int thr, size, rank; unsigned long long int local_c; unsigned long long int local_m; unsigned long long int increment; // Determine number of threads and this thread's rank. thr = threadIdx.x; size = gridDim.x*NT; rank = blockIdx.x*NT + thr; // Initialize per-thread. local_c = 0; local_m = 0; increment = 1; unsigned long long int atom_result =0; // Compute modular cube roots. for (unsigned long long int i = rank; i < N; i += size) { unsigned long long int first_mod = (i)%N; unsigned long long int second_mod = (first_mod * i)%N; unsigned long long int third_mod = (second_mod * i)%N; local_c = third_mod; local_m = i; if (local_c == c){ // atomic counter value updation. atom_result = atomicAdd(&count,increment); if (atom_result < arraySize) { final_M[atom_result]=local_m; } } } }
313d8e97960e8208833180cf9c890fa4f186ebee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "LinearAlgebraStructs.h" __global__ void squareElements_kernel(matrix a_d) { int i = blockIdx.x*blockDim.x + threadIdx.x; int columnIndex = i / a_d.rows; int rowIndex = i % a_d.rows; if (columnIndex < a_d.columns) {//make sure not to write outside of matrix, incase the number of elements did not have a base of 1024 double value = a_d.elements[MATRIX_INDEX(rowIndex, columnIndex, a_d)]; a_d.elements[MATRIX_INDEX(rowIndex,columnIndex,a_d)] = value * value; } } /** *creates a projection matrix on the gpu to the given matrix on the device. **/ extern "C" void squareElements_d(matrix a_d){ int N = a_d.rows*a_d.columns; //each thread handles two elements of the matrix hipLaunchKernelGGL(( squareElements_kernel), dim3((N+1023)/ 1024), dim3(1024), 0, 0, a_d); }
313d8e97960e8208833180cf9c890fa4f186ebee.cu
#include "LinearAlgebraStructs.h" __global__ void squareElements_kernel(matrix a_d) { int i = blockIdx.x*blockDim.x + threadIdx.x; int columnIndex = i / a_d.rows; int rowIndex = i % a_d.rows; if (columnIndex < a_d.columns) {//make sure not to write outside of matrix, incase the number of elements did not have a base of 1024 double value = a_d.elements[MATRIX_INDEX(rowIndex, columnIndex, a_d)]; a_d.elements[MATRIX_INDEX(rowIndex,columnIndex,a_d)] = value * value; } } /** *creates a projection matrix on the gpu to the given matrix on the device. **/ extern "C" void squareElements_d(matrix a_d){ int N = a_d.rows*a_d.columns; //each thread handles two elements of the matrix squareElements_kernel<<<(N+1023)/ 1024, 1024>>>(a_d); }
037454c053bd3d7dff451717c440c324175132b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_ideal_gas_kernel; int xdim0_ideal_gas_kernel_h = -1; __constant__ int ydim0_ideal_gas_kernel; int ydim0_ideal_gas_kernel_h = -1; __constant__ int xdim1_ideal_gas_kernel; int xdim1_ideal_gas_kernel_h = -1; __constant__ int ydim1_ideal_gas_kernel; int ydim1_ideal_gas_kernel_h = -1; __constant__ int xdim2_ideal_gas_kernel; int xdim2_ideal_gas_kernel_h = -1; __constant__ int ydim2_ideal_gas_kernel; int ydim2_ideal_gas_kernel_h = -1; __constant__ int xdim3_ideal_gas_kernel; int xdim3_ideal_gas_kernel_h = -1; __constant__ int ydim3_ideal_gas_kernel; int ydim3_ideal_gas_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x,y,z) (x+xdim0_ideal_gas_kernel*(y)+xdim0_ideal_gas_kernel*ydim0_ideal_gas_kernel*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_ideal_gas_kernel*(y)+xdim1_ideal_gas_kernel*ydim1_ideal_gas_kernel*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_ideal_gas_kernel*(y)+xdim2_ideal_gas_kernel*ydim2_ideal_gas_kernel*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_ideal_gas_kernel*(y)+xdim3_ideal_gas_kernel*ydim3_ideal_gas_kernel*(z)) //user function __device__ void ideal_gas_kernel_gpu( const double *density, const double *energy, double *pressure, double *soundspeed) { double sound_speed_squared, v, pressurebyenergy, pressurebyvolume; v = 1.0 / density[OPS_ACC0(0,0,0)]; pressure[OPS_ACC2(0,0,0)] = (1.4 - 1.0) * density[OPS_ACC0(0,0,0)] * energy[OPS_ACC1(0,0,0)]; pressurebyenergy = (1.4 - 1.0) * density[OPS_ACC0(0,0,0)]; pressurebyvolume = -1.0*density[OPS_ACC0(0,0,0)] * pressure[OPS_ACC2(0,0,0)]; sound_speed_squared = v*v*(pressure[OPS_ACC2(0,0,0)] * pressurebyenergy-pressurebyvolume); soundspeed[OPS_ACC3(0,0,0)] = sqrt(sound_speed_squared); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_ideal_gas_kernel( const double* __restrict arg0, const double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_ideal_gas_kernel + idx_z * 1*1 * xdim0_ideal_gas_kernel * ydim0_ideal_gas_kernel; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_ideal_gas_kernel + idx_z * 1*1 * xdim1_ideal_gas_kernel * ydim1_ideal_gas_kernel; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_ideal_gas_kernel + idx_z * 1*1 * xdim2_ideal_gas_kernel * ydim2_ideal_gas_kernel; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_ideal_gas_kernel + idx_z * 1*1 * xdim3_ideal_gas_kernel * ydim3_ideal_gas_kernel; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ideal_gas_kernel_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_ideal_gas_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,10)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(10,"ideal_gas_kernel"); OPS_kernels[10].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_ideal_gas_kernel_h || ydim0 != ydim0_ideal_gas_kernel_h || xdim1 != xdim1_ideal_gas_kernel_h || ydim1 != ydim1_ideal_gas_kernel_h || xdim2 != xdim2_ideal_gas_kernel_h || ydim2 != ydim2_ideal_gas_kernel_h || xdim3 != xdim3_ideal_gas_kernel_h || ydim3 != ydim3_ideal_gas_kernel_h) { hipMemcpyToSymbol( xdim0_ideal_gas_kernel, &xdim0, sizeof(int) ); xdim0_ideal_gas_kernel_h = xdim0; hipMemcpyToSymbol( ydim0_ideal_gas_kernel, &ydim0, sizeof(int) ); ydim0_ideal_gas_kernel_h = ydim0; hipMemcpyToSymbol( xdim1_ideal_gas_kernel, &xdim1, sizeof(int) ); xdim1_ideal_gas_kernel_h = xdim1; hipMemcpyToSymbol( ydim1_ideal_gas_kernel, &ydim1, sizeof(int) ); ydim1_ideal_gas_kernel_h = ydim1; hipMemcpyToSymbol( xdim2_ideal_gas_kernel, &xdim2, sizeof(int) ); xdim2_ideal_gas_kernel_h = xdim2; hipMemcpyToSymbol( ydim2_ideal_gas_kernel, &ydim2, sizeof(int) ); ydim2_ideal_gas_kernel_h = ydim2; hipMemcpyToSymbol( xdim3_ideal_gas_kernel, &xdim3, sizeof(int) ); xdim3_ideal_gas_kernel_h = xdim3; hipMemcpyToSymbol( ydim3_ideal_gas_kernel, &ydim3, sizeof(int) ); ydim3_ideal_gas_kernel_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[10].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_ideal_gas_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[10].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[10].mpi_time += t2-t1; OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 10; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 10; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_ideal_gas_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(10,"ideal_gas_kernel"); } ops_enqueue_kernel(desc); } #endif
037454c053bd3d7dff451717c440c324175132b7.cu
// // auto-generated by ops.py // __constant__ int xdim0_ideal_gas_kernel; int xdim0_ideal_gas_kernel_h = -1; __constant__ int ydim0_ideal_gas_kernel; int ydim0_ideal_gas_kernel_h = -1; __constant__ int xdim1_ideal_gas_kernel; int xdim1_ideal_gas_kernel_h = -1; __constant__ int ydim1_ideal_gas_kernel; int ydim1_ideal_gas_kernel_h = -1; __constant__ int xdim2_ideal_gas_kernel; int xdim2_ideal_gas_kernel_h = -1; __constant__ int ydim2_ideal_gas_kernel; int ydim2_ideal_gas_kernel_h = -1; __constant__ int xdim3_ideal_gas_kernel; int xdim3_ideal_gas_kernel_h = -1; __constant__ int ydim3_ideal_gas_kernel; int ydim3_ideal_gas_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x,y,z) (x+xdim0_ideal_gas_kernel*(y)+xdim0_ideal_gas_kernel*ydim0_ideal_gas_kernel*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_ideal_gas_kernel*(y)+xdim1_ideal_gas_kernel*ydim1_ideal_gas_kernel*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_ideal_gas_kernel*(y)+xdim2_ideal_gas_kernel*ydim2_ideal_gas_kernel*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_ideal_gas_kernel*(y)+xdim3_ideal_gas_kernel*ydim3_ideal_gas_kernel*(z)) //user function __device__ void ideal_gas_kernel_gpu( const double *density, const double *energy, double *pressure, double *soundspeed) { double sound_speed_squared, v, pressurebyenergy, pressurebyvolume; v = 1.0 / density[OPS_ACC0(0,0,0)]; pressure[OPS_ACC2(0,0,0)] = (1.4 - 1.0) * density[OPS_ACC0(0,0,0)] * energy[OPS_ACC1(0,0,0)]; pressurebyenergy = (1.4 - 1.0) * density[OPS_ACC0(0,0,0)]; pressurebyvolume = -1.0*density[OPS_ACC0(0,0,0)] * pressure[OPS_ACC2(0,0,0)]; sound_speed_squared = v*v*(pressure[OPS_ACC2(0,0,0)] * pressurebyenergy-pressurebyvolume); soundspeed[OPS_ACC3(0,0,0)] = sqrt(sound_speed_squared); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_ideal_gas_kernel( const double* __restrict arg0, const double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_ideal_gas_kernel + idx_z * 1*1 * xdim0_ideal_gas_kernel * ydim0_ideal_gas_kernel; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_ideal_gas_kernel + idx_z * 1*1 * xdim1_ideal_gas_kernel * ydim1_ideal_gas_kernel; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_ideal_gas_kernel + idx_z * 1*1 * xdim2_ideal_gas_kernel * ydim2_ideal_gas_kernel; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_ideal_gas_kernel + idx_z * 1*1 * xdim3_ideal_gas_kernel * ydim3_ideal_gas_kernel; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ideal_gas_kernel_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_ideal_gas_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,10)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(10,"ideal_gas_kernel"); OPS_kernels[10].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_ideal_gas_kernel_h || ydim0 != ydim0_ideal_gas_kernel_h || xdim1 != xdim1_ideal_gas_kernel_h || ydim1 != ydim1_ideal_gas_kernel_h || xdim2 != xdim2_ideal_gas_kernel_h || ydim2 != ydim2_ideal_gas_kernel_h || xdim3 != xdim3_ideal_gas_kernel_h || ydim3 != ydim3_ideal_gas_kernel_h) { cudaMemcpyToSymbol( xdim0_ideal_gas_kernel, &xdim0, sizeof(int) ); xdim0_ideal_gas_kernel_h = xdim0; cudaMemcpyToSymbol( ydim0_ideal_gas_kernel, &ydim0, sizeof(int) ); ydim0_ideal_gas_kernel_h = ydim0; cudaMemcpyToSymbol( xdim1_ideal_gas_kernel, &xdim1, sizeof(int) ); xdim1_ideal_gas_kernel_h = xdim1; cudaMemcpyToSymbol( ydim1_ideal_gas_kernel, &ydim1, sizeof(int) ); ydim1_ideal_gas_kernel_h = ydim1; cudaMemcpyToSymbol( xdim2_ideal_gas_kernel, &xdim2, sizeof(int) ); xdim2_ideal_gas_kernel_h = xdim2; cudaMemcpyToSymbol( ydim2_ideal_gas_kernel, &ydim2, sizeof(int) ); ydim2_ideal_gas_kernel_h = ydim2; cudaMemcpyToSymbol( xdim3_ideal_gas_kernel, &xdim3, sizeof(int) ); xdim3_ideal_gas_kernel_h = xdim3; cudaMemcpyToSymbol( ydim3_ideal_gas_kernel, &ydim3, sizeof(int) ); ydim3_ideal_gas_kernel_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[10].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_ideal_gas_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[10].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[10].mpi_time += t2-t1; OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 10; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 10; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_ideal_gas_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(10,"ideal_gas_kernel"); } ops_enqueue_kernel(desc); } #endif
a0c07afc49af66d303c228943339907fd6d44143.hip
// !!! This is a file automatically generated by hipify!!! // ======================================================================== // // Copyright 2018-2019 Ingo Wald // // // // Licensed under the Apache License, Version 2.0 (the "License"); // // you may not use this file except in compliance with the License. // // You may obtain a copy of the License at // // // // http://www.apache.org/licenses/LICENSE-2.0 // // // // Unless required by applicable law or agreed to in writing, software // // distributed under the License is distributed on an "AS IS" BASIS, // // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // // limitations under the License. // // ======================================================================== // #include <optix_device.h> #include <hip/hip_runtime.h> #include "LaunchParams.h" #include "gdt/random/random.h" #include <hip/hip_runtime.h> #include "Utils.h" using namespace osc; namespace osc { typedef gdt::LCG<16> Random; /*! launch parameters in constant memory, filled in by optix upon optixLaunch (this gets filled in from the buffer we pass to optixLaunch) */ extern "C" __constant__ LaunchParams optixLaunchParams; /*! per-ray data now captures random number generator, so programs can access RNG state */ struct CausticPRD { Random random; vec3f power; unsigned int depth; float currentIor; bool hitSpecular; }; extern "C" __global__ void __closesthit__caustic() { CausticPRD& prd = *getPRD<CausticPRD>(); const TriangleMeshSBTData& sbtData = *(const TriangleMeshSBTData*)optixGetSbtDataPointer(); const int ix = optixGetLaunchIndex().x; const int iy = optixGetLaunchIndex().y; // ------------------------------------------------------------------ // gather some basic hit information // ------------------------------------------------------------------ const int primID = optixGetPrimitiveIndex(); const vec3i index = sbtData.index[primID]; // ------------------------------------------------------------------ // compute normal, using either shading normal (if avail), or // geometry normal (fallback) // ------------------------------------------------------------------ const vec3f& A = sbtData.vertex[index.x]; const vec3f& B = sbtData.vertex[index.y]; const vec3f& C = sbtData.vertex[index.z]; vec3f Ng = cross(B - A, C - A); // ------------------------------------------------------------------ // compute ray intersection point // ------------------------------------------------------------------ const vec3f rayOrig = optixGetWorldRayOrigin(); const vec3f rayDir = optixGetWorldRayDirection(); const float rayT = optixGetRayTmax(); vec3f hitPoint = rayOrig + rayDir * rayT; // ------------------------------------------------------------------ // face-forward and normalize normals // ------------------------------------------------------------------ if (dot(rayDir, Ng) > 0.f) Ng = -Ng; Ng = normalize(Ng); int rand_index = prd.random() * NUM_PHOTON_SAMPLES; float coin = optixLaunchParams.halton[rand_index].x; // diffuse component is color for now float Pd = max(sbtData.color * prd.power) / max(prd.power); float Ps = max(sbtData.specular * prd.power) / max(prd.power); float Pt = max(sbtData.transmission * prd.power) / max(prd.power); prd.depth += 1; coin = prd.random(); if (coin <= Pd) { //diffuse PhotonPrint pp; if (prd.hitSpecular) { pp.position = hitPoint; pp.power = prd.power; optixLaunchParams.preCausticMap[ix + iy * NC] = pp; printf("chau\n"); } } else if (coin <= Pd + Ps) { // specular if (prd.depth <= optixLaunchParams.maxDepth) { uint32_t u0, u1; packPointer(&prd, u0, u1); // obtain reflection direction vec3f reflectDir = reflect(rayDir, Ng); //rayo en la direccion de reflexion desde punto; prd.power = (prd.power * sbtData.specular) / Ps; prd.hitSpecular = true; printf("hola\n"); optixTrace( optixLaunchParams.traversable, hitPoint, reflectDir, 1.e-4f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask(255), OPTIX_RAY_FLAG_DISABLE_ANYHIT, // OPTIX_RAY_FLAG_NONE, CAUSTIC_RAY_TYPE, // SBT offset RAY_TYPE_COUNT, // SBT stride CAUSTIC_RAY_TYPE, // missSBTIndex u0, u1 ); } } else if (coin <= Pd + Ps + Pt) { // transmission if (prd.depth <= optixLaunchParams.maxDepth) { uint32_t u0, u1; packPointer(&prd, u0, u1); printf("hola2\n"); float nit; float cosi = dot(rayDir, Ng); if (cosi < 0) { // ray comes from outside surface nit = prd.currentIor / sbtData.ior; cosi = -cosi; prd.currentIor = sbtData.ior; } else { // ray comes from inside surface nit = sbtData.ior; } float testrit = nit * nit * (1 - cosi * cosi); if (testrit <= 1) { // obtain refraction direction vec3f refractionDir = nit * rayDir + (nit * cosi - sqrtf(1 - testrit)) * Ng; prd.power = (prd.power * sbtData.transmission) / Pt; prd.hitSpecular = true; optixTrace( optixLaunchParams.traversable, hitPoint, refractionDir, 1e-4f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask(255), OPTIX_RAY_FLAG_DISABLE_ANYHIT, // OPTIX_RAY_FLAG_NONE, CAUSTIC_RAY_TYPE, // SBT offset RAY_TYPE_COUNT, // SBT stride CAUSTIC_RAY_TYPE, // missSBTIndex u0, u1 ); } } } else { // absorption check if need to store diffuse photons PhotonPrint pp; if (prd.hitSpecular) { pp.position = hitPoint; pp.power = prd.power; optixLaunchParams.preCausticMap[ix + iy * NC] = pp; printf("chau\n"); } } } extern "C" __global__ void __anyhit__caustic() { } extern "C" __global__ void __miss__caustic() { printf("miss\n"); } extern "C" __global__ void __raygen__renderCaustic() { const int ix = optixGetLaunchIndex().x; const int iy = optixGetLaunchIndex().y; CausticPRD prd; prd.random.init(ix, ix * optixLaunchParams.frame.size.x); prd.depth = 0; prd.power = optixLaunchParams.light.photonPower; prd.currentIor = 1; uint32_t u0, u1; packPointer(&prd, u0, u1); //Ncell = 10000 % cantidad de fotones lanzados por celda if (optixLaunchParams.projectionMap[ix + NC * iy] == 1) { // hit a specular float xmin = -1 + 2 * (ix - 1) / NC; float xmax = xmin + 2 / NC; float ymin = -1 + 2 * (iy - 1) / NC; float ymax = ymin + 2 / NC; float xmin2 = xmin * xmin; float xmax2 = xmax * xmax; float ymin2 = ymin * ymin; float ymax2 = ymax * ymax; printf("entra\n"); if ((xmin2 + ymin2) < 1 || (xmin2 + ymax2) < 1 || (xmax2 + ymin2) < 1 || (xmax2 + ymax2) < 1) { //% si alguna de las 4 esquinas de la celda est dentro del crculo de radio 1 for (int k = 0; k < NUM_CAUSTIC_PER_CELL; k++) { float x = xmin + 2 * prd.random() / NC; float y = ymin + 2 * prd.random() / NC; int z2 = 1 - x * x - y * y; z2 = z2 > 0.0f ? sqrtf(z2) : 0.0f; vec3f direction = optixLaunchParams.light.normal * vec3f(x, y, z2); prd.power = optixLaunchParams.light.photonPower; prd.depth = 0; prd.currentIor = 1; optixTrace( optixLaunchParams.traversable, optixLaunchParams.light.origin, direction, 1e-4f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask(255), OPTIX_RAY_FLAG_DISABLE_ANYHIT, // OPTIX_RAY_FLAG_NONE, CAUSTIC_RAY_TYPE, // SBT offset RAY_TYPE_COUNT, // SBT stride CAUSTIC_RAY_TYPE, // missSBTIndex //prd.depth // reinterpret_cast<unsigned int&>(prd.depth) u0, u1 ); } } } } }
a0c07afc49af66d303c228943339907fd6d44143.cu
// ======================================================================== // // Copyright 2018-2019 Ingo Wald // // // // Licensed under the Apache License, Version 2.0 (the "License"); // // you may not use this file except in compliance with the License. // // You may obtain a copy of the License at // // // // http://www.apache.org/licenses/LICENSE-2.0 // // // // Unless required by applicable law or agreed to in writing, software // // distributed under the License is distributed on an "AS IS" BASIS, // // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // // limitations under the License. // // ======================================================================== // #include <optix_device.h> #include <cuda_runtime.h> #include "LaunchParams.h" #include "gdt/random/random.h" #include <cuda.h> #include "Utils.h" using namespace osc; namespace osc { typedef gdt::LCG<16> Random; /*! launch parameters in constant memory, filled in by optix upon optixLaunch (this gets filled in from the buffer we pass to optixLaunch) */ extern "C" __constant__ LaunchParams optixLaunchParams; /*! per-ray data now captures random number generator, so programs can access RNG state */ struct CausticPRD { Random random; vec3f power; unsigned int depth; float currentIor; bool hitSpecular; }; extern "C" __global__ void __closesthit__caustic() { CausticPRD& prd = *getPRD<CausticPRD>(); const TriangleMeshSBTData& sbtData = *(const TriangleMeshSBTData*)optixGetSbtDataPointer(); const int ix = optixGetLaunchIndex().x; const int iy = optixGetLaunchIndex().y; // ------------------------------------------------------------------ // gather some basic hit information // ------------------------------------------------------------------ const int primID = optixGetPrimitiveIndex(); const vec3i index = sbtData.index[primID]; // ------------------------------------------------------------------ // compute normal, using either shading normal (if avail), or // geometry normal (fallback) // ------------------------------------------------------------------ const vec3f& A = sbtData.vertex[index.x]; const vec3f& B = sbtData.vertex[index.y]; const vec3f& C = sbtData.vertex[index.z]; vec3f Ng = cross(B - A, C - A); // ------------------------------------------------------------------ // compute ray intersection point // ------------------------------------------------------------------ const vec3f rayOrig = optixGetWorldRayOrigin(); const vec3f rayDir = optixGetWorldRayDirection(); const float rayT = optixGetRayTmax(); vec3f hitPoint = rayOrig + rayDir * rayT; // ------------------------------------------------------------------ // face-forward and normalize normals // ------------------------------------------------------------------ if (dot(rayDir, Ng) > 0.f) Ng = -Ng; Ng = normalize(Ng); int rand_index = prd.random() * NUM_PHOTON_SAMPLES; float coin = optixLaunchParams.halton[rand_index].x; // diffuse component is color for now float Pd = max(sbtData.color * prd.power) / max(prd.power); float Ps = max(sbtData.specular * prd.power) / max(prd.power); float Pt = max(sbtData.transmission * prd.power) / max(prd.power); prd.depth += 1; coin = prd.random(); if (coin <= Pd) { //diffuse PhotonPrint pp; if (prd.hitSpecular) { pp.position = hitPoint; pp.power = prd.power; optixLaunchParams.preCausticMap[ix + iy * NC] = pp; printf("chau\n"); } } else if (coin <= Pd + Ps) { // specular if (prd.depth <= optixLaunchParams.maxDepth) { uint32_t u0, u1; packPointer(&prd, u0, u1); // obtain reflection direction vec3f reflectDir = reflect(rayDir, Ng); //rayo en la direccion de reflexion desde punto; prd.power = (prd.power * sbtData.specular) / Ps; prd.hitSpecular = true; printf("hola\n"); optixTrace( optixLaunchParams.traversable, hitPoint, reflectDir, 1.e-4f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask(255), OPTIX_RAY_FLAG_DISABLE_ANYHIT, // OPTIX_RAY_FLAG_NONE, CAUSTIC_RAY_TYPE, // SBT offset RAY_TYPE_COUNT, // SBT stride CAUSTIC_RAY_TYPE, // missSBTIndex u0, u1 ); } } else if (coin <= Pd + Ps + Pt) { // transmission if (prd.depth <= optixLaunchParams.maxDepth) { uint32_t u0, u1; packPointer(&prd, u0, u1); printf("hola2\n"); float nit; float cosi = dot(rayDir, Ng); if (cosi < 0) { // ray comes from outside surface nit = prd.currentIor / sbtData.ior; cosi = -cosi; prd.currentIor = sbtData.ior; } else { // ray comes from inside surface nit = sbtData.ior; } float testrit = nit * nit * (1 - cosi * cosi); if (testrit <= 1) { // obtain refraction direction vec3f refractionDir = nit * rayDir + (nit * cosi - sqrtf(1 - testrit)) * Ng; prd.power = (prd.power * sbtData.transmission) / Pt; prd.hitSpecular = true; optixTrace( optixLaunchParams.traversable, hitPoint, refractionDir, 1e-4f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask(255), OPTIX_RAY_FLAG_DISABLE_ANYHIT, // OPTIX_RAY_FLAG_NONE, CAUSTIC_RAY_TYPE, // SBT offset RAY_TYPE_COUNT, // SBT stride CAUSTIC_RAY_TYPE, // missSBTIndex u0, u1 ); } } } else { // absorption check if need to store diffuse photons PhotonPrint pp; if (prd.hitSpecular) { pp.position = hitPoint; pp.power = prd.power; optixLaunchParams.preCausticMap[ix + iy * NC] = pp; printf("chau\n"); } } } extern "C" __global__ void __anyhit__caustic() { } extern "C" __global__ void __miss__caustic() { printf("miss\n"); } extern "C" __global__ void __raygen__renderCaustic() { const int ix = optixGetLaunchIndex().x; const int iy = optixGetLaunchIndex().y; CausticPRD prd; prd.random.init(ix, ix * optixLaunchParams.frame.size.x); prd.depth = 0; prd.power = optixLaunchParams.light.photonPower; prd.currentIor = 1; uint32_t u0, u1; packPointer(&prd, u0, u1); //Ncell = 10000 % cantidad de fotones lanzados por celda if (optixLaunchParams.projectionMap[ix + NC * iy] == 1) { // hit a specular float xmin = -1 + 2 * (ix - 1) / NC; float xmax = xmin + 2 / NC; float ymin = -1 + 2 * (iy - 1) / NC; float ymax = ymin + 2 / NC; float xmin2 = xmin * xmin; float xmax2 = xmax * xmax; float ymin2 = ymin * ymin; float ymax2 = ymax * ymax; printf("entra\n"); if ((xmin2 + ymin2) < 1 || (xmin2 + ymax2) < 1 || (xmax2 + ymin2) < 1 || (xmax2 + ymax2) < 1) { //% si alguna de las 4 esquinas de la celda está dentro del círculo de radio 1 for (int k = 0; k < NUM_CAUSTIC_PER_CELL; k++) { float x = xmin + 2 * prd.random() / NC; float y = ymin + 2 * prd.random() / NC; int z2 = 1 - x * x - y * y; z2 = z2 > 0.0f ? sqrtf(z2) : 0.0f; vec3f direction = optixLaunchParams.light.normal * vec3f(x, y, z2); prd.power = optixLaunchParams.light.photonPower; prd.depth = 0; prd.currentIor = 1; optixTrace( optixLaunchParams.traversable, optixLaunchParams.light.origin, direction, 1e-4f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask(255), OPTIX_RAY_FLAG_DISABLE_ANYHIT, // OPTIX_RAY_FLAG_NONE, CAUSTIC_RAY_TYPE, // SBT offset RAY_TYPE_COUNT, // SBT stride CAUSTIC_RAY_TYPE, // missSBTIndex //prd.depth // reinterpret_cast<unsigned int&>(prd.depth) u0, u1 ); } } } } }
17ddfb7afe441646f5abfec56c43742994cebdb0.hip
// !!! This is a file automatically generated by hipify!!! #include <complex> #include <cusp/copy.cuh> #include <cusp/cusp.cuh> #include <gtest/gtest.h> template <typename T> void run_test(int N) { std::vector<std::complex<float>> host_input_data(N); for (int i = 0; i < N; i++) { host_input_data[i] = std::complex<float>(i, -i); } std::vector<std::complex<float>> host_output_data(N); void *dev_input_data; void *dev_output_data; hipMalloc(&dev_input_data, N * sizeof(std::complex<float>)); hipMalloc(&dev_output_data, N * sizeof(std::complex<float>)); hipMemcpy(dev_input_data, host_input_data.data(), N * sizeof(std::complex<float>), hipMemcpyHostToDevice); int ncopies = N * sizeof(std::complex<float>) / sizeof(T); cusp::copy<T> op; int minGrid, blockSize, gridSize; op.occupancy(&blockSize, &minGrid); gridSize = (ncopies + blockSize - 1) / blockSize; op.set_block_and_grid(blockSize, gridSize); op.launch({dev_input_data}, {dev_output_data}, ncopies); hipDeviceSynchronize(); hipMemcpy(host_output_data.data(), dev_output_data, N * sizeof(std::complex<float>), hipMemcpyDeviceToHost); EXPECT_EQ(host_input_data, host_output_data); } TEST(CopyKernel, Basic) { int N = 1024 * 100; N = 32; run_test<uint64_t>(N); run_test<uint8_t>(N); }
17ddfb7afe441646f5abfec56c43742994cebdb0.cu
#include <complex> #include <cusp/copy.cuh> #include <cusp/cusp.cuh> #include <gtest/gtest.h> template <typename T> void run_test(int N) { std::vector<std::complex<float>> host_input_data(N); for (int i = 0; i < N; i++) { host_input_data[i] = std::complex<float>(i, -i); } std::vector<std::complex<float>> host_output_data(N); void *dev_input_data; void *dev_output_data; cudaMalloc(&dev_input_data, N * sizeof(std::complex<float>)); cudaMalloc(&dev_output_data, N * sizeof(std::complex<float>)); cudaMemcpy(dev_input_data, host_input_data.data(), N * sizeof(std::complex<float>), cudaMemcpyHostToDevice); int ncopies = N * sizeof(std::complex<float>) / sizeof(T); cusp::copy<T> op; int minGrid, blockSize, gridSize; op.occupancy(&blockSize, &minGrid); gridSize = (ncopies + blockSize - 1) / blockSize; op.set_block_and_grid(blockSize, gridSize); op.launch({dev_input_data}, {dev_output_data}, ncopies); cudaDeviceSynchronize(); cudaMemcpy(host_output_data.data(), dev_output_data, N * sizeof(std::complex<float>), cudaMemcpyDeviceToHost); EXPECT_EQ(host_input_data, host_output_data); } TEST(CopyKernel, Basic) { int N = 1024 * 100; N = 32; run_test<uint64_t>(N); run_test<uint8_t>(N); }
7ad39987a0cb8185ddfc3a252356689907924f23.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/utility.hpp" #include "opencv2/core/cuda/functional.hpp" #include "opencv2/core/cuda/limits.hpp" #include "opencv2/core/cuda/vec_math.hpp" #include "opencv2/core/cuda/reduce.hpp" #include "opencv2/core/cuda/filters.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" #include <iostream> using namespace cv::cuda; using namespace cv::cuda::device; namespace pyrlk { __constant__ int c_winSize_x; __constant__ int c_winSize_y; __constant__ int c_halfWin_x; __constant__ int c_halfWin_y; __constant__ int c_iters; texture<uchar, hipTextureType2D, hipReadModeNormalizedFloat> tex_I8U(false, hipFilterModeLinear, hipAddressModeClamp); texture<uchar4, hipTextureType2D, hipReadModeNormalizedFloat> tex_I8UC4(false, hipFilterModeLinear, hipAddressModeClamp); texture<ushort4, hipTextureType2D, hipReadModeNormalizedFloat> tex_I16UC4(false, hipFilterModeLinear, hipAddressModeClamp); texture<float, hipTextureType2D, hipReadModeElementType> tex_If(false, hipFilterModeLinear, hipAddressModeClamp); texture<float4, hipTextureType2D, hipReadModeElementType> tex_If4(false, hipFilterModeLinear, hipAddressModeClamp); texture<uchar, hipTextureType2D, hipReadModeElementType> tex_Ib(false, hipFilterModePoint, hipAddressModeClamp); texture<uchar, hipTextureType2D, hipReadModeNormalizedFloat> tex_J8U(false, hipFilterModeLinear, hipAddressModeClamp); texture<uchar4, hipTextureType2D, hipReadModeNormalizedFloat> tex_J8UC4(false, hipFilterModeLinear, hipAddressModeClamp); texture<ushort4, hipTextureType2D, hipReadModeNormalizedFloat> tex_J16UC4(false, hipFilterModeLinear, hipAddressModeClamp); texture<float, hipTextureType2D, hipReadModeElementType> tex_Jf(false, hipFilterModeLinear, hipAddressModeClamp); texture<float4, hipTextureType2D, hipReadModeElementType> tex_Jf4(false, hipFilterModeLinear, hipAddressModeClamp); template <int cn, typename T> struct Tex_I { static __host__ __forceinline__ void bindTexture_(PtrStepSz<typename TypeVec<T, cn>::vec_type> I) { CV_UNUSED(I); } }; template <> struct Tex_I<1, uchar> { static __device__ __forceinline__ float read(float x, float y) { return tex2D(tex_I8U, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<uchar>& I) { bindTexture(&tex_I8U, I); } }; template <> struct Tex_I<1, ushort> { static __device__ __forceinline__ float read(float x, float y) { return 0.0; } static __host__ __forceinline__ void bindTexture_(PtrStepSz<ushort>& I) { CV_UNUSED(I); } }; template <> struct Tex_I<1, int> { static __device__ __forceinline__ float read(float x, float y) { return 0.0; } static __host__ __forceinline__ void bindTexture_(PtrStepSz<int>& I) { CV_UNUSED(I); } }; template <> struct Tex_I<1, float> { static __device__ __forceinline__ float read(float x, float y) { return tex2D(tex_If, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<float>& I) { bindTexture(&tex_If, I); } }; // ****************** 3 channel specializations ************************ template <> struct Tex_I<3, uchar> { static __device__ __forceinline__ float3 read(float x, float y) { return make_float3(0,0,0); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<uchar3> I) { CV_UNUSED(I); } }; template <> struct Tex_I<3, ushort> { static __device__ __forceinline__ float3 read(float x, float y) { return make_float3(0, 0, 0); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<ushort3> I) { CV_UNUSED(I); } }; template <> struct Tex_I<3, int> { static __device__ __forceinline__ float3 read(float x, float y) { return make_float3(0, 0, 0); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<int3> I) { CV_UNUSED(I); } }; template <> struct Tex_I<3, float> { static __device__ __forceinline__ float3 read(float x, float y) { return make_float3(0, 0, 0); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<float3> I) { CV_UNUSED(I); } }; // ****************** 4 channel specializations ************************ template <> struct Tex_I<4, uchar> { static __device__ __forceinline__ float4 read(float x, float y) { return tex2D(tex_I8UC4, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<uchar4>& I) { bindTexture(&tex_I8UC4, I); } }; template <> struct Tex_I<4, ushort> { static __device__ __forceinline__ float4 read(float x, float y) { return tex2D(tex_I16UC4, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<ushort4>& I) { bindTexture(&tex_I16UC4, I); } }; template <> struct Tex_I<4, float> { static __device__ __forceinline__ float4 read(float x, float y) { return tex2D(tex_If4, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<float4>& I) { bindTexture(&tex_If4, I); } }; // ************* J *************** template <int cn, typename T> struct Tex_J { static __host__ __forceinline__ void bindTexture_(PtrStepSz<typename TypeVec<T,cn>::vec_type>& J) { CV_UNUSED(J); } }; template <> struct Tex_J<1, uchar> { static __device__ __forceinline__ float read(float x, float y) { return tex2D(tex_J8U, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<uchar>& J) { bindTexture(&tex_J8U, J); } }; template <> struct Tex_J<1, float> { static __device__ __forceinline__ float read(float x, float y) { return tex2D(tex_Jf, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<float>& J) { bindTexture(&tex_Jf, J); } }; // ************* 4 channel specializations *************** template <> struct Tex_J<4, uchar> { static __device__ __forceinline__ float4 read(float x, float y) { return tex2D(tex_J8UC4, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<uchar4>& J) { bindTexture(&tex_J8UC4, J); } }; template <> struct Tex_J<4, ushort> { static __device__ __forceinline__ float4 read(float x, float y) { return tex2D(tex_J16UC4, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<ushort4>& J) { bindTexture(&tex_J16UC4, J); } }; template <> struct Tex_J<4, float> { static __device__ __forceinline__ float4 read(float x, float y) { return tex2D(tex_Jf4, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<float4>& J) { bindTexture(&tex_Jf4, J); } }; __device__ __forceinline__ void accum(float& dst, const float& val) { dst += val; } __device__ __forceinline__ void accum(float& dst, const float2& val) { dst += val.x + val.y; } __device__ __forceinline__ void accum(float& dst, const float3& val) { dst += val.x + val.y + val.z; } __device__ __forceinline__ void accum(float& dst, const float4& val) { dst += val.x + val.y + val.z + val.w; } __device__ __forceinline__ float abs_(float a) { return ::fabsf(a); } __device__ __forceinline__ float4 abs_(const float4& a) { return abs(a); } __device__ __forceinline__ float2 abs_(const float2& a) { return abs(a); } __device__ __forceinline__ float3 abs_(const float3& a) { return abs(a); } template<typename T> __device__ __forceinline__ typename TypeVec<float, 1>::vec_type ToFloat(const typename TypeVec<T, 1>::vec_type& other) { return other; } template<typename T> __device__ __forceinline__ typename TypeVec<float, 2>::vec_type ToFloat(const typename TypeVec<T, 2>::vec_type& other) { typename TypeVec<float, 2>::vec_type ret; ret.x = other.x; ret.y = other.y; return ret; } template<typename T> __device__ __forceinline__ typename TypeVec<float, 3>::vec_type ToFloat(const typename TypeVec<T, 3>::vec_type& other) { typename TypeVec<float, 3>::vec_type ret; ret.x = other.x; ret.y = other.y; ret.z = other.z; return ret; } template<typename T> __device__ __forceinline__ typename TypeVec<float, 4>::vec_type ToFloat(const typename TypeVec<T, 4>::vec_type& other) { typename TypeVec<float, 4>::vec_type ret; ret.x = other.x; ret.y = other.y; ret.z = other.z; ret.w = other.w; return ret; } template <typename T> struct DenormalizationFactor { static __device__ __forceinline__ float factor() { return 1.0f; } }; template <> struct DenormalizationFactor<uchar> { static __device__ __forceinline__ float factor() { return 255.0f; } }; template <int cn, int PATCH_X, int PATCH_Y, bool calcErr, typename T> __global__ void sparseKernel(const float2* prevPts, float2* nextPts, uchar* status, float* err, const int level, const int rows, const int cols) { #if __CUDA_ARCH__ <= 110 const int BLOCK_SIZE = 128; #else const int BLOCK_SIZE = 256; #endif __shared__ float smem1[BLOCK_SIZE]; __shared__ float smem2[BLOCK_SIZE]; __shared__ float smem3[BLOCK_SIZE]; const unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; float2 prevPt = prevPts[blockIdx.x]; prevPt.x *= (1.0f / (1 << level)); prevPt.y *= (1.0f / (1 << level)); if (prevPt.x < 0 || prevPt.x >= cols || prevPt.y < 0 || prevPt.y >= rows) { if (tid == 0 && level == 0) status[blockIdx.x] = 0; return; } prevPt.x -= c_halfWin_x; prevPt.y -= c_halfWin_y; // extract the patch from the first image, compute covariation matrix of derivatives float A11 = 0; float A12 = 0; float A22 = 0; typedef typename TypeVec<float, cn>::vec_type work_type; work_type I_patch [PATCH_Y][PATCH_X]; work_type dIdx_patch[PATCH_Y][PATCH_X]; work_type dIdy_patch[PATCH_Y][PATCH_X]; for (int yBase = threadIdx.y, i = 0; yBase < c_winSize_y; yBase += blockDim.y, ++i) { for (int xBase = threadIdx.x, j = 0; xBase < c_winSize_x; xBase += blockDim.x, ++j) { float x = prevPt.x + xBase + 0.5f; float y = prevPt.y + yBase + 0.5f; I_patch[i][j] = Tex_I<cn, T>::read(x, y); // Scharr Deriv work_type dIdx = 3.0f * Tex_I<cn,T>::read(x+1, y-1) + 10.0f * Tex_I<cn, T>::read(x+1, y) + 3.0f * Tex_I<cn,T>::read(x+1, y+1) - (3.0f * Tex_I<cn,T>::read(x-1, y-1) + 10.0f * Tex_I<cn, T>::read(x-1, y) + 3.0f * Tex_I<cn,T>::read(x-1, y+1)); work_type dIdy = 3.0f * Tex_I<cn,T>::read(x-1, y+1) + 10.0f * Tex_I<cn, T>::read(x, y+1) + 3.0f * Tex_I<cn,T>::read(x+1, y+1) - (3.0f * Tex_I<cn,T>::read(x-1, y-1) + 10.0f * Tex_I<cn, T>::read(x, y-1) + 3.0f * Tex_I<cn,T>::read(x+1, y-1)); dIdx_patch[i][j] = dIdx; dIdy_patch[i][j] = dIdy; accum(A11, dIdx * dIdx); accum(A12, dIdx * dIdy); accum(A22, dIdy * dIdy); } } reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2, smem3), thrust::tie(A11, A12, A22), tid, thrust::make_tuple(plus<float>(), plus<float>(), plus<float>())); #if __CUDA_ARCH__ >= 300 if (tid == 0) { smem1[0] = A11; smem2[0] = A12; smem3[0] = A22; } #endif __syncthreads(); A11 = smem1[0]; A12 = smem2[0]; A22 = smem3[0]; float D = A11 * A22 - A12 * A12; if (D < numeric_limits<float>::epsilon()) { if (tid == 0 && level == 0) status[blockIdx.x] = 0; return; } D = 1.f / D; A11 *= D; A12 *= D; A22 *= D; float2 nextPt = nextPts[blockIdx.x]; nextPt.x *= 2.f; nextPt.y *= 2.f; nextPt.x -= c_halfWin_x; nextPt.y -= c_halfWin_y; for (int k = 0; k < c_iters; ++k) { if (nextPt.x < -c_halfWin_x || nextPt.x >= cols || nextPt.y < -c_halfWin_y || nextPt.y >= rows) { if (tid == 0 && level == 0) status[blockIdx.x] = 0; return; } float b1 = 0; float b2 = 0; for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i) { for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j) { work_type I_val = I_patch[i][j]; work_type J_val = Tex_J<cn, T>::read(nextPt.x + x + 0.5f, nextPt.y + y + 0.5f); work_type diff = (J_val - I_val) * 32.0f; accum(b1, diff * dIdx_patch[i][j]); accum(b2, diff * dIdy_patch[i][j]); } } reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2), thrust::tie(b1, b2), tid, thrust::make_tuple(plus<float>(), plus<float>())); #if __CUDA_ARCH__ >= 300 if (tid == 0) { smem1[0] = b1; smem2[0] = b2; } #endif __syncthreads(); b1 = smem1[0]; b2 = smem2[0]; float2 delta; delta.x = A12 * b2 - A22 * b1; delta.y = A12 * b1 - A11 * b2; nextPt.x += delta.x; nextPt.y += delta.y; if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f) break; } float errval = 0; if (calcErr) { for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i) { for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j) { work_type I_val = I_patch[i][j]; work_type J_val = Tex_J<cn, T>::read(nextPt.x + x + 0.5f, nextPt.y + y + 0.5f); work_type diff = J_val - I_val; accum(errval, abs_(diff)); } } reduce<BLOCK_SIZE>(smem1, errval, tid, plus<float>()); } if (tid == 0) { nextPt.x += c_halfWin_x; nextPt.y += c_halfWin_y; nextPts[blockIdx.x] = nextPt; if (calcErr) err[blockIdx.x] = static_cast<float>(errval) / (::min(cn, 3) * c_winSize_x * c_winSize_y) * DenormalizationFactor<T>::factor(); } } // Kernel, uses non texture fetches template <int PATCH_X, int PATCH_Y, bool calcErr, int cn, typename T, typename Ptr2D> __global__ void sparseKernel_(Ptr2D I, Ptr2D J, const float2* prevPts, float2* nextPts, uchar* status, float* err, const int level, const int rows, const int cols) { #if __CUDA_ARCH__ <= 110 const int BLOCK_SIZE = 128; #else const int BLOCK_SIZE = 256; #endif __shared__ float smem1[BLOCK_SIZE]; __shared__ float smem2[BLOCK_SIZE]; __shared__ float smem3[BLOCK_SIZE]; const unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; float2 prevPt = prevPts[blockIdx.x]; prevPt.x *= (1.0f / (1 << level)); prevPt.y *= (1.0f / (1 << level)); if (prevPt.x < 0 || prevPt.x >= cols || prevPt.y < 0 || prevPt.y >= rows) { if (tid == 0 && level == 0) status[blockIdx.x] = 0; return; } prevPt.x -= c_halfWin_x; prevPt.y -= c_halfWin_y; // extract the patch from the first image, compute covariation matrix of derivatives float A11 = 0; float A12 = 0; float A22 = 0; typedef typename TypeVec<float, cn>::vec_type work_type; work_type I_patch[PATCH_Y][PATCH_X]; work_type dIdx_patch[PATCH_Y][PATCH_X]; work_type dIdy_patch[PATCH_Y][PATCH_X]; for (int yBase = threadIdx.y, i = 0; yBase < c_winSize_y; yBase += blockDim.y, ++i) { for (int xBase = threadIdx.x, j = 0; xBase < c_winSize_x; xBase += blockDim.x, ++j) { float x = prevPt.x + xBase + 0.5f; float y = prevPt.y + yBase + 0.5f; I_patch[i][j] = ToFloat<T>(I(y, x)); // Scharr Deriv work_type dIdx = 3.0f * I(y - 1, x + 1) + 10.0f * I(y, x + 1) + 3.0f * I(y + 1, x + 1) - (3.0f * I(y - 1, x - 1) + 10.0f * I(y, x - 1) + 3.0f * I(y + 1 , x - 1)); work_type dIdy = 3.0f * I(y + 1, x - 1) + 10.0f * I(y + 1, x) + 3.0f * I(y+1, x + 1) - (3.0f * I(y - 1, x - 1) + 10.0f * I(y-1, x) + 3.0f * I(y - 1, x + 1)); dIdx_patch[i][j] = dIdx; dIdy_patch[i][j] = dIdy; accum(A11, dIdx * dIdx); accum(A12, dIdx * dIdy); accum(A22, dIdy * dIdy); } } reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2, smem3), thrust::tie(A11, A12, A22), tid, thrust::make_tuple(plus<float>(), plus<float>(), plus<float>())); #if __CUDA_ARCH__ >= 300 if (tid == 0) { smem1[0] = A11; smem2[0] = A12; smem3[0] = A22; } #endif __syncthreads(); A11 = smem1[0]; A12 = smem2[0]; A22 = smem3[0]; float D = A11 * A22 - A12 * A12; if (D < numeric_limits<float>::epsilon()) { if (tid == 0 && level == 0) status[blockIdx.x] = 0; return; } D = 1.f / D; A11 *= D; A12 *= D; A22 *= D; float2 nextPt = nextPts[blockIdx.x]; nextPt.x *= 2.f; nextPt.y *= 2.f; nextPt.x -= c_halfWin_x; nextPt.y -= c_halfWin_y; for (int k = 0; k < c_iters; ++k) { if (nextPt.x < -c_halfWin_x || nextPt.x >= cols || nextPt.y < -c_halfWin_y || nextPt.y >= rows) { if (tid == 0 && level == 0) status[blockIdx.x] = 0; return; } float b1 = 0; float b2 = 0; for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i) { for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j) { work_type I_val = I_patch[i][j]; work_type J_val = ToFloat<T>(J(nextPt.y + y + 0.5f, nextPt.x + x + 0.5f)); work_type diff = (J_val - I_val) * 32.0f; accum(b1, diff * dIdx_patch[i][j]); accum(b2, diff * dIdy_patch[i][j]); } } reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2), thrust::tie(b1, b2), tid, thrust::make_tuple(plus<float>(), plus<float>())); #if __CUDA_ARCH__ >= 300 if (tid == 0) { smem1[0] = b1; smem2[0] = b2; } #endif __syncthreads(); b1 = smem1[0]; b2 = smem2[0]; float2 delta; delta.x = A12 * b2 - A22 * b1; delta.y = A12 * b1 - A11 * b2; nextPt.x += delta.x; nextPt.y += delta.y; if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f) break; } float errval = 0; if (calcErr) { for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i) { for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j) { work_type I_val = I_patch[i][j]; work_type J_val = ToFloat<T>(J(nextPt.y + y + 0.5f, nextPt.x + x + 0.5f)); work_type diff = J_val - I_val; accum(errval, abs_(diff)); } } reduce<BLOCK_SIZE>(smem1, errval, tid, plus<float>()); } if (tid == 0) { nextPt.x += c_halfWin_x; nextPt.y += c_halfWin_y; nextPts[blockIdx.x] = nextPt; if (calcErr) err[blockIdx.x] = static_cast<float>(errval) / (::min(cn, 3)*c_winSize_x * c_winSize_y); } } // __global__ void sparseKernel_ template <int cn, int PATCH_X, int PATCH_Y, typename T> class sparse_caller { public: static void call(PtrStepSz<typename TypeVec<T, cn>::vec_type> I, PtrStepSz<typename TypeVec<T, cn>::vec_type> J, int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, int level, dim3 block, hipStream_t stream) { dim3 grid(ptcount); CV_UNUSED(I); CV_UNUSED(J); if (level == 0 && err) hipLaunchKernelGGL(( sparseKernel<cn, PATCH_X, PATCH_Y, true, T>) , dim3(grid), dim3(block), 0, stream , prevPts, nextPts, status, err, level, rows, cols); else hipLaunchKernelGGL(( sparseKernel<cn, PATCH_X, PATCH_Y, false, T>) , dim3(grid), dim3(block), 0, stream , prevPts, nextPts, status, err, level, rows, cols); cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } }; // Specialization to use non texture path because for some reason the texture path keeps failing accuracy tests template<int PATCH_X, int PATCH_Y> class sparse_caller<1, PATCH_X, PATCH_Y, unsigned short> { public: typedef typename TypeVec<unsigned short, 1>::vec_type work_type; typedef PtrStepSz<work_type> Ptr2D; typedef BrdConstant<work_type> BrdType; typedef BorderReader<Ptr2D, BrdType> Reader; typedef LinearFilter<Reader> Filter; static void call(Ptr2D I, Ptr2D J, int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, int level, dim3 block, hipStream_t stream) { dim3 grid(ptcount); if (level == 0 && err) { hipLaunchKernelGGL(( sparseKernel_<PATCH_X, PATCH_Y, true, 1, unsigned short>) , dim3(grid), dim3(block), 0, stream , Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } else { hipLaunchKernelGGL(( sparseKernel_<PATCH_X, PATCH_Y, false, 1, unsigned short>) , dim3(grid), dim3(block), 0, stream , Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } }; // Specialization for int because the texture path keeps failing template<int PATCH_X, int PATCH_Y> class sparse_caller<1, PATCH_X, PATCH_Y, int> { public: typedef typename TypeVec<int, 1>::vec_type work_type; typedef PtrStepSz<work_type> Ptr2D; typedef BrdConstant<work_type> BrdType; typedef BorderReader<Ptr2D, BrdType> Reader; typedef LinearFilter<Reader> Filter; static void call(Ptr2D I, Ptr2D J, int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, int level, dim3 block, hipStream_t stream) { dim3 grid(ptcount); if (level == 0 && err) { hipLaunchKernelGGL(( sparseKernel_<PATCH_X, PATCH_Y, true, 1, int>) , dim3(grid), dim3(block), 0, stream , Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } else { hipLaunchKernelGGL(( sparseKernel_<PATCH_X, PATCH_Y, false, 1, int>) , dim3(grid), dim3(block), 0, stream , Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } }; template<int PATCH_X, int PATCH_Y> class sparse_caller<4, PATCH_X, PATCH_Y, int> { public: typedef typename TypeVec<int, 4>::vec_type work_type; typedef PtrStepSz<work_type> Ptr2D; typedef BrdConstant<work_type> BrdType; typedef BorderReader<Ptr2D, BrdType> Reader; typedef LinearFilter<Reader> Filter; static void call(Ptr2D I, Ptr2D J, int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, int level, dim3 block, hipStream_t stream) { dim3 grid(ptcount); if (level == 0 && err) { hipLaunchKernelGGL(( sparseKernel_<PATCH_X, PATCH_Y, true, 4, int>) , dim3(grid), dim3(block), 0, stream , Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } else { hipLaunchKernelGGL(( sparseKernel_<PATCH_X, PATCH_Y, false, 4, int>) , dim3(grid), dim3(block), 0, stream , Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } }; using namespace cv::cuda::device; template <int PATCH_X, int PATCH_Y, typename T> class sparse_caller<3, PATCH_X, PATCH_Y, T> { public: typedef typename TypeVec<T, 3>::vec_type work_type; typedef PtrStepSz<work_type> Ptr2D; typedef BrdConstant<work_type> BrdType; typedef BorderReader<Ptr2D, BrdType> Reader; typedef LinearFilter<Reader> Filter; static void call(Ptr2D I, Ptr2D J, int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, int level, dim3 block, hipStream_t stream) { dim3 grid(ptcount); if (level == 0 && err) { hipLaunchKernelGGL(( sparseKernel_<PATCH_X, PATCH_Y, true, 3, T>) , dim3(grid), dim3(block), 0, stream , Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } else { hipLaunchKernelGGL(( sparseKernel_<PATCH_X, PATCH_Y, false, 3, T>) , dim3(grid), dim3(block), 0, stream , Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } }; template <bool calcErr> __global__ void denseKernel(PtrStepf u, PtrStepf v, const PtrStepf prevU, const PtrStepf prevV, PtrStepf err, const int rows, const int cols) { extern __shared__ int smem[]; const int patchWidth = blockDim.x + 2 * c_halfWin_x; const int patchHeight = blockDim.y + 2 * c_halfWin_y; int* I_patch = smem; int* dIdx_patch = I_patch + patchWidth * patchHeight; int* dIdy_patch = dIdx_patch + patchWidth * patchHeight; const int xBase = blockIdx.x * blockDim.x; const int yBase = blockIdx.y * blockDim.y; for (int i = threadIdx.y; i < patchHeight; i += blockDim.y) { for (int j = threadIdx.x; j < patchWidth; j += blockDim.x) { float x = xBase - c_halfWin_x + j + 0.5f; float y = yBase - c_halfWin_y + i + 0.5f; I_patch[i * patchWidth + j] = tex2D(tex_If, x, y); // Scharr Deriv dIdx_patch[i * patchWidth + j] = 3 * tex2D(tex_If, x+1, y-1) + 10 * tex2D(tex_If, x+1, y) + 3 * tex2D(tex_If, x+1, y+1) - (3 * tex2D(tex_If, x-1, y-1) + 10 * tex2D(tex_If, x-1, y) + 3 * tex2D(tex_If, x-1, y+1)); dIdy_patch[i * patchWidth + j] = 3 * tex2D(tex_If, x-1, y+1) + 10 * tex2D(tex_If, x, y+1) + 3 * tex2D(tex_If, x+1, y+1) - (3 * tex2D(tex_If, x-1, y-1) + 10 * tex2D(tex_If, x, y-1) + 3 * tex2D(tex_If, x+1, y-1)); } } __syncthreads(); const int x = xBase + threadIdx.x; const int y = yBase + threadIdx.y; if (x >= cols || y >= rows) return; int A11i = 0; int A12i = 0; int A22i = 0; for (int i = 0; i < c_winSize_y; ++i) { for (int j = 0; j < c_winSize_x; ++j) { int dIdx = dIdx_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)]; int dIdy = dIdy_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)]; A11i += dIdx * dIdx; A12i += dIdx * dIdy; A22i += dIdy * dIdy; } } float A11 = A11i; float A12 = A12i; float A22 = A22i; float D = A11 * A22 - A12 * A12; if (D < numeric_limits<float>::epsilon()) { if (calcErr) err(y, x) = numeric_limits<float>::max(); return; } D = 1.f / D; A11 *= D; A12 *= D; A22 *= D; float2 nextPt; nextPt.x = x + prevU(y/2, x/2) * 2.0f; nextPt.y = y + prevV(y/2, x/2) * 2.0f; for (int k = 0; k < c_iters; ++k) { if (nextPt.x < 0 || nextPt.x >= cols || nextPt.y < 0 || nextPt.y >= rows) { if (calcErr) err(y, x) = numeric_limits<float>::max(); return; } int b1 = 0; int b2 = 0; for (int i = 0; i < c_winSize_y; ++i) { for (int j = 0; j < c_winSize_x; ++j) { int I = I_patch[(threadIdx.y + i) * patchWidth + threadIdx.x + j]; int J = tex2D(tex_Jf, nextPt.x - c_halfWin_x + j + 0.5f, nextPt.y - c_halfWin_y + i + 0.5f); int diff = (J - I) * 32; int dIdx = dIdx_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)]; int dIdy = dIdy_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)]; b1 += diff * dIdx; b2 += diff * dIdy; } } float2 delta; delta.x = A12 * b2 - A22 * b1; delta.y = A12 * b1 - A11 * b2; nextPt.x += delta.x; nextPt.y += delta.y; if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f) break; } u(y, x) = nextPt.x - x; v(y, x) = nextPt.y - y; if (calcErr) { int errval = 0; for (int i = 0; i < c_winSize_y; ++i) { for (int j = 0; j < c_winSize_x; ++j) { int I = I_patch[(threadIdx.y + i) * patchWidth + threadIdx.x + j]; int J = tex2D(tex_Jf, nextPt.x - c_halfWin_x + j + 0.5f, nextPt.y - c_halfWin_y + i + 0.5f); errval += ::abs(J - I); } } err(y, x) = static_cast<float>(errval) / (c_winSize_x * c_winSize_y); } } void loadWinSize(int* winSize, int* halfWinSize, hipStream_t stream) { cudaSafeCall( hipMemcpyToSymbolAsync(c_winSize_x, winSize, sizeof(int), 0, hipMemcpyHostToDevice, stream) ); cudaSafeCall( hipMemcpyToSymbolAsync(c_winSize_y, winSize + 1, sizeof(int), 0, hipMemcpyHostToDevice, stream) ); cudaSafeCall( hipMemcpyToSymbolAsync(c_halfWin_x, halfWinSize, sizeof(int), 0, hipMemcpyHostToDevice, stream) ); cudaSafeCall( hipMemcpyToSymbolAsync(c_halfWin_y, halfWinSize + 1, sizeof(int), 0, hipMemcpyHostToDevice, stream) ); } void loadIters(int* iters, hipStream_t stream) { cudaSafeCall( hipMemcpyToSymbolAsync(c_iters, iters, sizeof(int), 0, hipMemcpyHostToDevice, stream) ); } void loadConstants(int2 winSize_, int iters_, hipStream_t stream) { static int2 winSize = make_int2(0,0); if(winSize.x != winSize_.x || winSize.y != winSize_.y) { winSize = winSize_; cudaSafeCall( hipMemcpyToSymbolAsync(c_winSize_x, &winSize.x, sizeof(int), 0, hipMemcpyHostToDevice, stream) ); cudaSafeCall( hipMemcpyToSymbolAsync(c_winSize_y, &winSize.y, sizeof(int), 0, hipMemcpyHostToDevice, stream) ); } static int2 halfWin = make_int2(0,0); int2 half = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2); if(halfWin.x != half.x || halfWin.y != half.y) { halfWin = half; cudaSafeCall( hipMemcpyToSymbolAsync(c_halfWin_x, &halfWin.x, sizeof(int), 0, hipMemcpyHostToDevice, stream) ); cudaSafeCall( hipMemcpyToSymbolAsync(c_halfWin_y, &halfWin.y, sizeof(int), 0, hipMemcpyHostToDevice, stream) ); } static int iters = 0; if(iters != iters_) { iters = iters_; cudaSafeCall( hipMemcpyToSymbolAsync(c_iters, &iters, sizeof(int), 0, hipMemcpyHostToDevice, stream) ); } } template<typename T, int cn> struct pyrLK_caller { static void sparse(PtrStepSz<typename TypeVec<T, cn>::vec_type> I, PtrStepSz<typename TypeVec<T, cn>::vec_type> J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, int level, dim3 block, dim3 patch, hipStream_t stream) { typedef void(*func_t)(PtrStepSz<typename TypeVec<T, cn>::vec_type> I, PtrStepSz<typename TypeVec<T, cn>::vec_type> J, int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, int level, dim3 block, hipStream_t stream); static const func_t funcs[5][5] = { { sparse_caller<cn, 1, 1,T>::call, sparse_caller<cn, 2, 1,T>::call, sparse_caller<cn, 3, 1,T>::call, sparse_caller<cn, 4, 1,T>::call, sparse_caller<cn, 5, 1,T>::call }, { sparse_caller<cn, 1, 2,T>::call, sparse_caller<cn, 2, 2,T>::call, sparse_caller<cn, 3, 2,T>::call, sparse_caller<cn, 4, 2,T>::call, sparse_caller<cn, 5, 2,T>::call }, { sparse_caller<cn, 1, 3,T>::call, sparse_caller<cn, 2, 3,T>::call, sparse_caller<cn, 3, 3,T>::call, sparse_caller<cn, 4, 3,T>::call, sparse_caller<cn, 5, 3,T>::call }, { sparse_caller<cn, 1, 4,T>::call, sparse_caller<cn, 2, 4,T>::call, sparse_caller<cn, 3, 4,T>::call, sparse_caller<cn, 4, 4,T>::call, sparse_caller<cn, 5, 4,T>::call }, { sparse_caller<cn, 1, 5,T>::call, sparse_caller<cn, 2, 5,T>::call, sparse_caller<cn, 3, 5,T>::call, sparse_caller<cn, 4, 5,T>::call, sparse_caller<cn, 5, 5,T>::call } }; Tex_I<cn, T>::bindTexture_(I); Tex_J<cn, T>::bindTexture_(J); funcs[patch.y - 1][patch.x - 1](I, J, I.rows, I.cols, prevPts, nextPts, status, err, ptcount, level, block, stream); } static void dense(PtrStepSz<T> I, PtrStepSz<T> J, PtrStepSzf u, PtrStepSzf v, PtrStepSzf prevU, PtrStepSzf prevV, PtrStepSzf err, int2 winSize, hipStream_t stream) { dim3 block(16, 16); dim3 grid(divUp(I.cols, block.x), divUp(I.rows, block.y)); Tex_I<1, T>::bindTexture_(I); Tex_J<1, T>::bindTexture_(J); int2 halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2); const int patchWidth = block.x + 2 * halfWin.x; const int patchHeight = block.y + 2 * halfWin.y; size_t smem_size = 3 * patchWidth * patchHeight * sizeof(int); if (err.data) { denseKernel<true> << <grid, block, smem_size, stream >> >(u, v, prevU, prevV, err, I.rows, I.cols); cudaSafeCall(hipGetLastError()); } else { denseKernel<false> << <grid, block, smem_size, stream >> >(u, v, prevU, prevV, PtrStepf(), I.rows, I.cols); cudaSafeCall(hipGetLastError()); } if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } }; template class pyrLK_caller<unsigned char,1>; template class pyrLK_caller<unsigned short,1>; template class pyrLK_caller<int,1>; template class pyrLK_caller<float,1>; template class pyrLK_caller<unsigned char, 3>; template class pyrLK_caller<unsigned short, 3>; template class pyrLK_caller<int, 3>; template class pyrLK_caller<float, 3>; template class pyrLK_caller<unsigned char, 4>; template class pyrLK_caller<unsigned short, 4>; template class pyrLK_caller<int, 4>; template class pyrLK_caller<float, 4>; } #endif /* CUDA_DISABLER */
7ad39987a0cb8185ddfc3a252356689907924f23.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/utility.hpp" #include "opencv2/core/cuda/functional.hpp" #include "opencv2/core/cuda/limits.hpp" #include "opencv2/core/cuda/vec_math.hpp" #include "opencv2/core/cuda/reduce.hpp" #include "opencv2/core/cuda/filters.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" #include <iostream> using namespace cv::cuda; using namespace cv::cuda::device; namespace pyrlk { __constant__ int c_winSize_x; __constant__ int c_winSize_y; __constant__ int c_halfWin_x; __constant__ int c_halfWin_y; __constant__ int c_iters; texture<uchar, cudaTextureType2D, cudaReadModeNormalizedFloat> tex_I8U(false, cudaFilterModeLinear, cudaAddressModeClamp); texture<uchar4, cudaTextureType2D, cudaReadModeNormalizedFloat> tex_I8UC4(false, cudaFilterModeLinear, cudaAddressModeClamp); texture<ushort4, cudaTextureType2D, cudaReadModeNormalizedFloat> tex_I16UC4(false, cudaFilterModeLinear, cudaAddressModeClamp); texture<float, cudaTextureType2D, cudaReadModeElementType> tex_If(false, cudaFilterModeLinear, cudaAddressModeClamp); texture<float4, cudaTextureType2D, cudaReadModeElementType> tex_If4(false, cudaFilterModeLinear, cudaAddressModeClamp); texture<uchar, cudaTextureType2D, cudaReadModeElementType> tex_Ib(false, cudaFilterModePoint, cudaAddressModeClamp); texture<uchar, cudaTextureType2D, cudaReadModeNormalizedFloat> tex_J8U(false, cudaFilterModeLinear, cudaAddressModeClamp); texture<uchar4, cudaTextureType2D, cudaReadModeNormalizedFloat> tex_J8UC4(false, cudaFilterModeLinear, cudaAddressModeClamp); texture<ushort4, cudaTextureType2D, cudaReadModeNormalizedFloat> tex_J16UC4(false, cudaFilterModeLinear, cudaAddressModeClamp); texture<float, cudaTextureType2D, cudaReadModeElementType> tex_Jf(false, cudaFilterModeLinear, cudaAddressModeClamp); texture<float4, cudaTextureType2D, cudaReadModeElementType> tex_Jf4(false, cudaFilterModeLinear, cudaAddressModeClamp); template <int cn, typename T> struct Tex_I { static __host__ __forceinline__ void bindTexture_(PtrStepSz<typename TypeVec<T, cn>::vec_type> I) { CV_UNUSED(I); } }; template <> struct Tex_I<1, uchar> { static __device__ __forceinline__ float read(float x, float y) { return tex2D(tex_I8U, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<uchar>& I) { bindTexture(&tex_I8U, I); } }; template <> struct Tex_I<1, ushort> { static __device__ __forceinline__ float read(float x, float y) { return 0.0; } static __host__ __forceinline__ void bindTexture_(PtrStepSz<ushort>& I) { CV_UNUSED(I); } }; template <> struct Tex_I<1, int> { static __device__ __forceinline__ float read(float x, float y) { return 0.0; } static __host__ __forceinline__ void bindTexture_(PtrStepSz<int>& I) { CV_UNUSED(I); } }; template <> struct Tex_I<1, float> { static __device__ __forceinline__ float read(float x, float y) { return tex2D(tex_If, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<float>& I) { bindTexture(&tex_If, I); } }; // ****************** 3 channel specializations ************************ template <> struct Tex_I<3, uchar> { static __device__ __forceinline__ float3 read(float x, float y) { return make_float3(0,0,0); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<uchar3> I) { CV_UNUSED(I); } }; template <> struct Tex_I<3, ushort> { static __device__ __forceinline__ float3 read(float x, float y) { return make_float3(0, 0, 0); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<ushort3> I) { CV_UNUSED(I); } }; template <> struct Tex_I<3, int> { static __device__ __forceinline__ float3 read(float x, float y) { return make_float3(0, 0, 0); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<int3> I) { CV_UNUSED(I); } }; template <> struct Tex_I<3, float> { static __device__ __forceinline__ float3 read(float x, float y) { return make_float3(0, 0, 0); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<float3> I) { CV_UNUSED(I); } }; // ****************** 4 channel specializations ************************ template <> struct Tex_I<4, uchar> { static __device__ __forceinline__ float4 read(float x, float y) { return tex2D(tex_I8UC4, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<uchar4>& I) { bindTexture(&tex_I8UC4, I); } }; template <> struct Tex_I<4, ushort> { static __device__ __forceinline__ float4 read(float x, float y) { return tex2D(tex_I16UC4, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<ushort4>& I) { bindTexture(&tex_I16UC4, I); } }; template <> struct Tex_I<4, float> { static __device__ __forceinline__ float4 read(float x, float y) { return tex2D(tex_If4, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<float4>& I) { bindTexture(&tex_If4, I); } }; // ************* J *************** template <int cn, typename T> struct Tex_J { static __host__ __forceinline__ void bindTexture_(PtrStepSz<typename TypeVec<T,cn>::vec_type>& J) { CV_UNUSED(J); } }; template <> struct Tex_J<1, uchar> { static __device__ __forceinline__ float read(float x, float y) { return tex2D(tex_J8U, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<uchar>& J) { bindTexture(&tex_J8U, J); } }; template <> struct Tex_J<1, float> { static __device__ __forceinline__ float read(float x, float y) { return tex2D(tex_Jf, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<float>& J) { bindTexture(&tex_Jf, J); } }; // ************* 4 channel specializations *************** template <> struct Tex_J<4, uchar> { static __device__ __forceinline__ float4 read(float x, float y) { return tex2D(tex_J8UC4, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<uchar4>& J) { bindTexture(&tex_J8UC4, J); } }; template <> struct Tex_J<4, ushort> { static __device__ __forceinline__ float4 read(float x, float y) { return tex2D(tex_J16UC4, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<ushort4>& J) { bindTexture(&tex_J16UC4, J); } }; template <> struct Tex_J<4, float> { static __device__ __forceinline__ float4 read(float x, float y) { return tex2D(tex_Jf4, x, y); } static __host__ __forceinline__ void bindTexture_(PtrStepSz<float4>& J) { bindTexture(&tex_Jf4, J); } }; __device__ __forceinline__ void accum(float& dst, const float& val) { dst += val; } __device__ __forceinline__ void accum(float& dst, const float2& val) { dst += val.x + val.y; } __device__ __forceinline__ void accum(float& dst, const float3& val) { dst += val.x + val.y + val.z; } __device__ __forceinline__ void accum(float& dst, const float4& val) { dst += val.x + val.y + val.z + val.w; } __device__ __forceinline__ float abs_(float a) { return ::fabsf(a); } __device__ __forceinline__ float4 abs_(const float4& a) { return abs(a); } __device__ __forceinline__ float2 abs_(const float2& a) { return abs(a); } __device__ __forceinline__ float3 abs_(const float3& a) { return abs(a); } template<typename T> __device__ __forceinline__ typename TypeVec<float, 1>::vec_type ToFloat(const typename TypeVec<T, 1>::vec_type& other) { return other; } template<typename T> __device__ __forceinline__ typename TypeVec<float, 2>::vec_type ToFloat(const typename TypeVec<T, 2>::vec_type& other) { typename TypeVec<float, 2>::vec_type ret; ret.x = other.x; ret.y = other.y; return ret; } template<typename T> __device__ __forceinline__ typename TypeVec<float, 3>::vec_type ToFloat(const typename TypeVec<T, 3>::vec_type& other) { typename TypeVec<float, 3>::vec_type ret; ret.x = other.x; ret.y = other.y; ret.z = other.z; return ret; } template<typename T> __device__ __forceinline__ typename TypeVec<float, 4>::vec_type ToFloat(const typename TypeVec<T, 4>::vec_type& other) { typename TypeVec<float, 4>::vec_type ret; ret.x = other.x; ret.y = other.y; ret.z = other.z; ret.w = other.w; return ret; } template <typename T> struct DenormalizationFactor { static __device__ __forceinline__ float factor() { return 1.0f; } }; template <> struct DenormalizationFactor<uchar> { static __device__ __forceinline__ float factor() { return 255.0f; } }; template <int cn, int PATCH_X, int PATCH_Y, bool calcErr, typename T> __global__ void sparseKernel(const float2* prevPts, float2* nextPts, uchar* status, float* err, const int level, const int rows, const int cols) { #if __CUDA_ARCH__ <= 110 const int BLOCK_SIZE = 128; #else const int BLOCK_SIZE = 256; #endif __shared__ float smem1[BLOCK_SIZE]; __shared__ float smem2[BLOCK_SIZE]; __shared__ float smem3[BLOCK_SIZE]; const unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; float2 prevPt = prevPts[blockIdx.x]; prevPt.x *= (1.0f / (1 << level)); prevPt.y *= (1.0f / (1 << level)); if (prevPt.x < 0 || prevPt.x >= cols || prevPt.y < 0 || prevPt.y >= rows) { if (tid == 0 && level == 0) status[blockIdx.x] = 0; return; } prevPt.x -= c_halfWin_x; prevPt.y -= c_halfWin_y; // extract the patch from the first image, compute covariation matrix of derivatives float A11 = 0; float A12 = 0; float A22 = 0; typedef typename TypeVec<float, cn>::vec_type work_type; work_type I_patch [PATCH_Y][PATCH_X]; work_type dIdx_patch[PATCH_Y][PATCH_X]; work_type dIdy_patch[PATCH_Y][PATCH_X]; for (int yBase = threadIdx.y, i = 0; yBase < c_winSize_y; yBase += blockDim.y, ++i) { for (int xBase = threadIdx.x, j = 0; xBase < c_winSize_x; xBase += blockDim.x, ++j) { float x = prevPt.x + xBase + 0.5f; float y = prevPt.y + yBase + 0.5f; I_patch[i][j] = Tex_I<cn, T>::read(x, y); // Scharr Deriv work_type dIdx = 3.0f * Tex_I<cn,T>::read(x+1, y-1) + 10.0f * Tex_I<cn, T>::read(x+1, y) + 3.0f * Tex_I<cn,T>::read(x+1, y+1) - (3.0f * Tex_I<cn,T>::read(x-1, y-1) + 10.0f * Tex_I<cn, T>::read(x-1, y) + 3.0f * Tex_I<cn,T>::read(x-1, y+1)); work_type dIdy = 3.0f * Tex_I<cn,T>::read(x-1, y+1) + 10.0f * Tex_I<cn, T>::read(x, y+1) + 3.0f * Tex_I<cn,T>::read(x+1, y+1) - (3.0f * Tex_I<cn,T>::read(x-1, y-1) + 10.0f * Tex_I<cn, T>::read(x, y-1) + 3.0f * Tex_I<cn,T>::read(x+1, y-1)); dIdx_patch[i][j] = dIdx; dIdy_patch[i][j] = dIdy; accum(A11, dIdx * dIdx); accum(A12, dIdx * dIdy); accum(A22, dIdy * dIdy); } } reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2, smem3), thrust::tie(A11, A12, A22), tid, thrust::make_tuple(plus<float>(), plus<float>(), plus<float>())); #if __CUDA_ARCH__ >= 300 if (tid == 0) { smem1[0] = A11; smem2[0] = A12; smem3[0] = A22; } #endif __syncthreads(); A11 = smem1[0]; A12 = smem2[0]; A22 = smem3[0]; float D = A11 * A22 - A12 * A12; if (D < numeric_limits<float>::epsilon()) { if (tid == 0 && level == 0) status[blockIdx.x] = 0; return; } D = 1.f / D; A11 *= D; A12 *= D; A22 *= D; float2 nextPt = nextPts[blockIdx.x]; nextPt.x *= 2.f; nextPt.y *= 2.f; nextPt.x -= c_halfWin_x; nextPt.y -= c_halfWin_y; for (int k = 0; k < c_iters; ++k) { if (nextPt.x < -c_halfWin_x || nextPt.x >= cols || nextPt.y < -c_halfWin_y || nextPt.y >= rows) { if (tid == 0 && level == 0) status[blockIdx.x] = 0; return; } float b1 = 0; float b2 = 0; for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i) { for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j) { work_type I_val = I_patch[i][j]; work_type J_val = Tex_J<cn, T>::read(nextPt.x + x + 0.5f, nextPt.y + y + 0.5f); work_type diff = (J_val - I_val) * 32.0f; accum(b1, diff * dIdx_patch[i][j]); accum(b2, diff * dIdy_patch[i][j]); } } reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2), thrust::tie(b1, b2), tid, thrust::make_tuple(plus<float>(), plus<float>())); #if __CUDA_ARCH__ >= 300 if (tid == 0) { smem1[0] = b1; smem2[0] = b2; } #endif __syncthreads(); b1 = smem1[0]; b2 = smem2[0]; float2 delta; delta.x = A12 * b2 - A22 * b1; delta.y = A12 * b1 - A11 * b2; nextPt.x += delta.x; nextPt.y += delta.y; if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f) break; } float errval = 0; if (calcErr) { for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i) { for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j) { work_type I_val = I_patch[i][j]; work_type J_val = Tex_J<cn, T>::read(nextPt.x + x + 0.5f, nextPt.y + y + 0.5f); work_type diff = J_val - I_val; accum(errval, abs_(diff)); } } reduce<BLOCK_SIZE>(smem1, errval, tid, plus<float>()); } if (tid == 0) { nextPt.x += c_halfWin_x; nextPt.y += c_halfWin_y; nextPts[blockIdx.x] = nextPt; if (calcErr) err[blockIdx.x] = static_cast<float>(errval) / (::min(cn, 3) * c_winSize_x * c_winSize_y) * DenormalizationFactor<T>::factor(); } } // Kernel, uses non texture fetches template <int PATCH_X, int PATCH_Y, bool calcErr, int cn, typename T, typename Ptr2D> __global__ void sparseKernel_(Ptr2D I, Ptr2D J, const float2* prevPts, float2* nextPts, uchar* status, float* err, const int level, const int rows, const int cols) { #if __CUDA_ARCH__ <= 110 const int BLOCK_SIZE = 128; #else const int BLOCK_SIZE = 256; #endif __shared__ float smem1[BLOCK_SIZE]; __shared__ float smem2[BLOCK_SIZE]; __shared__ float smem3[BLOCK_SIZE]; const unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; float2 prevPt = prevPts[blockIdx.x]; prevPt.x *= (1.0f / (1 << level)); prevPt.y *= (1.0f / (1 << level)); if (prevPt.x < 0 || prevPt.x >= cols || prevPt.y < 0 || prevPt.y >= rows) { if (tid == 0 && level == 0) status[blockIdx.x] = 0; return; } prevPt.x -= c_halfWin_x; prevPt.y -= c_halfWin_y; // extract the patch from the first image, compute covariation matrix of derivatives float A11 = 0; float A12 = 0; float A22 = 0; typedef typename TypeVec<float, cn>::vec_type work_type; work_type I_patch[PATCH_Y][PATCH_X]; work_type dIdx_patch[PATCH_Y][PATCH_X]; work_type dIdy_patch[PATCH_Y][PATCH_X]; for (int yBase = threadIdx.y, i = 0; yBase < c_winSize_y; yBase += blockDim.y, ++i) { for (int xBase = threadIdx.x, j = 0; xBase < c_winSize_x; xBase += blockDim.x, ++j) { float x = prevPt.x + xBase + 0.5f; float y = prevPt.y + yBase + 0.5f; I_patch[i][j] = ToFloat<T>(I(y, x)); // Scharr Deriv work_type dIdx = 3.0f * I(y - 1, x + 1) + 10.0f * I(y, x + 1) + 3.0f * I(y + 1, x + 1) - (3.0f * I(y - 1, x - 1) + 10.0f * I(y, x - 1) + 3.0f * I(y + 1 , x - 1)); work_type dIdy = 3.0f * I(y + 1, x - 1) + 10.0f * I(y + 1, x) + 3.0f * I(y+1, x + 1) - (3.0f * I(y - 1, x - 1) + 10.0f * I(y-1, x) + 3.0f * I(y - 1, x + 1)); dIdx_patch[i][j] = dIdx; dIdy_patch[i][j] = dIdy; accum(A11, dIdx * dIdx); accum(A12, dIdx * dIdy); accum(A22, dIdy * dIdy); } } reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2, smem3), thrust::tie(A11, A12, A22), tid, thrust::make_tuple(plus<float>(), plus<float>(), plus<float>())); #if __CUDA_ARCH__ >= 300 if (tid == 0) { smem1[0] = A11; smem2[0] = A12; smem3[0] = A22; } #endif __syncthreads(); A11 = smem1[0]; A12 = smem2[0]; A22 = smem3[0]; float D = A11 * A22 - A12 * A12; if (D < numeric_limits<float>::epsilon()) { if (tid == 0 && level == 0) status[blockIdx.x] = 0; return; } D = 1.f / D; A11 *= D; A12 *= D; A22 *= D; float2 nextPt = nextPts[blockIdx.x]; nextPt.x *= 2.f; nextPt.y *= 2.f; nextPt.x -= c_halfWin_x; nextPt.y -= c_halfWin_y; for (int k = 0; k < c_iters; ++k) { if (nextPt.x < -c_halfWin_x || nextPt.x >= cols || nextPt.y < -c_halfWin_y || nextPt.y >= rows) { if (tid == 0 && level == 0) status[blockIdx.x] = 0; return; } float b1 = 0; float b2 = 0; for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i) { for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j) { work_type I_val = I_patch[i][j]; work_type J_val = ToFloat<T>(J(nextPt.y + y + 0.5f, nextPt.x + x + 0.5f)); work_type diff = (J_val - I_val) * 32.0f; accum(b1, diff * dIdx_patch[i][j]); accum(b2, diff * dIdy_patch[i][j]); } } reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2), thrust::tie(b1, b2), tid, thrust::make_tuple(plus<float>(), plus<float>())); #if __CUDA_ARCH__ >= 300 if (tid == 0) { smem1[0] = b1; smem2[0] = b2; } #endif __syncthreads(); b1 = smem1[0]; b2 = smem2[0]; float2 delta; delta.x = A12 * b2 - A22 * b1; delta.y = A12 * b1 - A11 * b2; nextPt.x += delta.x; nextPt.y += delta.y; if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f) break; } float errval = 0; if (calcErr) { for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i) { for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j) { work_type I_val = I_patch[i][j]; work_type J_val = ToFloat<T>(J(nextPt.y + y + 0.5f, nextPt.x + x + 0.5f)); work_type diff = J_val - I_val; accum(errval, abs_(diff)); } } reduce<BLOCK_SIZE>(smem1, errval, tid, plus<float>()); } if (tid == 0) { nextPt.x += c_halfWin_x; nextPt.y += c_halfWin_y; nextPts[blockIdx.x] = nextPt; if (calcErr) err[blockIdx.x] = static_cast<float>(errval) / (::min(cn, 3)*c_winSize_x * c_winSize_y); } } // __global__ void sparseKernel_ template <int cn, int PATCH_X, int PATCH_Y, typename T> class sparse_caller { public: static void call(PtrStepSz<typename TypeVec<T, cn>::vec_type> I, PtrStepSz<typename TypeVec<T, cn>::vec_type> J, int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, int level, dim3 block, cudaStream_t stream) { dim3 grid(ptcount); CV_UNUSED(I); CV_UNUSED(J); if (level == 0 && err) sparseKernel<cn, PATCH_X, PATCH_Y, true, T> <<<grid, block, 0, stream >>>(prevPts, nextPts, status, err, level, rows, cols); else sparseKernel<cn, PATCH_X, PATCH_Y, false, T> <<<grid, block, 0, stream >>>(prevPts, nextPts, status, err, level, rows, cols); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } }; // Specialization to use non texture path because for some reason the texture path keeps failing accuracy tests template<int PATCH_X, int PATCH_Y> class sparse_caller<1, PATCH_X, PATCH_Y, unsigned short> { public: typedef typename TypeVec<unsigned short, 1>::vec_type work_type; typedef PtrStepSz<work_type> Ptr2D; typedef BrdConstant<work_type> BrdType; typedef BorderReader<Ptr2D, BrdType> Reader; typedef LinearFilter<Reader> Filter; static void call(Ptr2D I, Ptr2D J, int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, int level, dim3 block, cudaStream_t stream) { dim3 grid(ptcount); if (level == 0 && err) { sparseKernel_<PATCH_X, PATCH_Y, true, 1, unsigned short> <<<grid, block, 0, stream >>>( Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } else { sparseKernel_<PATCH_X, PATCH_Y, false, 1, unsigned short> <<<grid, block, 0, stream >>>( Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } }; // Specialization for int because the texture path keeps failing template<int PATCH_X, int PATCH_Y> class sparse_caller<1, PATCH_X, PATCH_Y, int> { public: typedef typename TypeVec<int, 1>::vec_type work_type; typedef PtrStepSz<work_type> Ptr2D; typedef BrdConstant<work_type> BrdType; typedef BorderReader<Ptr2D, BrdType> Reader; typedef LinearFilter<Reader> Filter; static void call(Ptr2D I, Ptr2D J, int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, int level, dim3 block, cudaStream_t stream) { dim3 grid(ptcount); if (level == 0 && err) { sparseKernel_<PATCH_X, PATCH_Y, true, 1, int> <<<grid, block, 0, stream >>>( Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } else { sparseKernel_<PATCH_X, PATCH_Y, false, 1, int> <<<grid, block, 0, stream >>>( Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } }; template<int PATCH_X, int PATCH_Y> class sparse_caller<4, PATCH_X, PATCH_Y, int> { public: typedef typename TypeVec<int, 4>::vec_type work_type; typedef PtrStepSz<work_type> Ptr2D; typedef BrdConstant<work_type> BrdType; typedef BorderReader<Ptr2D, BrdType> Reader; typedef LinearFilter<Reader> Filter; static void call(Ptr2D I, Ptr2D J, int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, int level, dim3 block, cudaStream_t stream) { dim3 grid(ptcount); if (level == 0 && err) { sparseKernel_<PATCH_X, PATCH_Y, true, 4, int> <<<grid, block, 0, stream >>>( Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } else { sparseKernel_<PATCH_X, PATCH_Y, false, 4, int> <<<grid, block, 0, stream >>>( Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } }; using namespace cv::cuda::device; template <int PATCH_X, int PATCH_Y, typename T> class sparse_caller<3, PATCH_X, PATCH_Y, T> { public: typedef typename TypeVec<T, 3>::vec_type work_type; typedef PtrStepSz<work_type> Ptr2D; typedef BrdConstant<work_type> BrdType; typedef BorderReader<Ptr2D, BrdType> Reader; typedef LinearFilter<Reader> Filter; static void call(Ptr2D I, Ptr2D J, int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, int level, dim3 block, cudaStream_t stream) { dim3 grid(ptcount); if (level == 0 && err) { sparseKernel_<PATCH_X, PATCH_Y, true, 3, T> <<<grid, block, 0, stream >>>( Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } else { sparseKernel_<PATCH_X, PATCH_Y, false, 3, T> <<<grid, block, 0, stream >>>( Filter(Reader(I, BrdType(rows, cols))), Filter(Reader(J, BrdType(rows, cols))), prevPts, nextPts, status, err, level, rows, cols); } cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } }; template <bool calcErr> __global__ void denseKernel(PtrStepf u, PtrStepf v, const PtrStepf prevU, const PtrStepf prevV, PtrStepf err, const int rows, const int cols) { extern __shared__ int smem[]; const int patchWidth = blockDim.x + 2 * c_halfWin_x; const int patchHeight = blockDim.y + 2 * c_halfWin_y; int* I_patch = smem; int* dIdx_patch = I_patch + patchWidth * patchHeight; int* dIdy_patch = dIdx_patch + patchWidth * patchHeight; const int xBase = blockIdx.x * blockDim.x; const int yBase = blockIdx.y * blockDim.y; for (int i = threadIdx.y; i < patchHeight; i += blockDim.y) { for (int j = threadIdx.x; j < patchWidth; j += blockDim.x) { float x = xBase - c_halfWin_x + j + 0.5f; float y = yBase - c_halfWin_y + i + 0.5f; I_patch[i * patchWidth + j] = tex2D(tex_If, x, y); // Scharr Deriv dIdx_patch[i * patchWidth + j] = 3 * tex2D(tex_If, x+1, y-1) + 10 * tex2D(tex_If, x+1, y) + 3 * tex2D(tex_If, x+1, y+1) - (3 * tex2D(tex_If, x-1, y-1) + 10 * tex2D(tex_If, x-1, y) + 3 * tex2D(tex_If, x-1, y+1)); dIdy_patch[i * patchWidth + j] = 3 * tex2D(tex_If, x-1, y+1) + 10 * tex2D(tex_If, x, y+1) + 3 * tex2D(tex_If, x+1, y+1) - (3 * tex2D(tex_If, x-1, y-1) + 10 * tex2D(tex_If, x, y-1) + 3 * tex2D(tex_If, x+1, y-1)); } } __syncthreads(); const int x = xBase + threadIdx.x; const int y = yBase + threadIdx.y; if (x >= cols || y >= rows) return; int A11i = 0; int A12i = 0; int A22i = 0; for (int i = 0; i < c_winSize_y; ++i) { for (int j = 0; j < c_winSize_x; ++j) { int dIdx = dIdx_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)]; int dIdy = dIdy_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)]; A11i += dIdx * dIdx; A12i += dIdx * dIdy; A22i += dIdy * dIdy; } } float A11 = A11i; float A12 = A12i; float A22 = A22i; float D = A11 * A22 - A12 * A12; if (D < numeric_limits<float>::epsilon()) { if (calcErr) err(y, x) = numeric_limits<float>::max(); return; } D = 1.f / D; A11 *= D; A12 *= D; A22 *= D; float2 nextPt; nextPt.x = x + prevU(y/2, x/2) * 2.0f; nextPt.y = y + prevV(y/2, x/2) * 2.0f; for (int k = 0; k < c_iters; ++k) { if (nextPt.x < 0 || nextPt.x >= cols || nextPt.y < 0 || nextPt.y >= rows) { if (calcErr) err(y, x) = numeric_limits<float>::max(); return; } int b1 = 0; int b2 = 0; for (int i = 0; i < c_winSize_y; ++i) { for (int j = 0; j < c_winSize_x; ++j) { int I = I_patch[(threadIdx.y + i) * patchWidth + threadIdx.x + j]; int J = tex2D(tex_Jf, nextPt.x - c_halfWin_x + j + 0.5f, nextPt.y - c_halfWin_y + i + 0.5f); int diff = (J - I) * 32; int dIdx = dIdx_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)]; int dIdy = dIdy_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)]; b1 += diff * dIdx; b2 += diff * dIdy; } } float2 delta; delta.x = A12 * b2 - A22 * b1; delta.y = A12 * b1 - A11 * b2; nextPt.x += delta.x; nextPt.y += delta.y; if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f) break; } u(y, x) = nextPt.x - x; v(y, x) = nextPt.y - y; if (calcErr) { int errval = 0; for (int i = 0; i < c_winSize_y; ++i) { for (int j = 0; j < c_winSize_x; ++j) { int I = I_patch[(threadIdx.y + i) * patchWidth + threadIdx.x + j]; int J = tex2D(tex_Jf, nextPt.x - c_halfWin_x + j + 0.5f, nextPt.y - c_halfWin_y + i + 0.5f); errval += ::abs(J - I); } } err(y, x) = static_cast<float>(errval) / (c_winSize_x * c_winSize_y); } } void loadWinSize(int* winSize, int* halfWinSize, cudaStream_t stream) { cudaSafeCall( cudaMemcpyToSymbolAsync(c_winSize_x, winSize, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); cudaSafeCall( cudaMemcpyToSymbolAsync(c_winSize_y, winSize + 1, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); cudaSafeCall( cudaMemcpyToSymbolAsync(c_halfWin_x, halfWinSize, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); cudaSafeCall( cudaMemcpyToSymbolAsync(c_halfWin_y, halfWinSize + 1, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); } void loadIters(int* iters, cudaStream_t stream) { cudaSafeCall( cudaMemcpyToSymbolAsync(c_iters, iters, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); } void loadConstants(int2 winSize_, int iters_, cudaStream_t stream) { static int2 winSize = make_int2(0,0); if(winSize.x != winSize_.x || winSize.y != winSize_.y) { winSize = winSize_; cudaSafeCall( cudaMemcpyToSymbolAsync(c_winSize_x, &winSize.x, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); cudaSafeCall( cudaMemcpyToSymbolAsync(c_winSize_y, &winSize.y, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); } static int2 halfWin = make_int2(0,0); int2 half = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2); if(halfWin.x != half.x || halfWin.y != half.y) { halfWin = half; cudaSafeCall( cudaMemcpyToSymbolAsync(c_halfWin_x, &halfWin.x, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); cudaSafeCall( cudaMemcpyToSymbolAsync(c_halfWin_y, &halfWin.y, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); } static int iters = 0; if(iters != iters_) { iters = iters_; cudaSafeCall( cudaMemcpyToSymbolAsync(c_iters, &iters, sizeof(int), 0, cudaMemcpyHostToDevice, stream) ); } } template<typename T, int cn> struct pyrLK_caller { static void sparse(PtrStepSz<typename TypeVec<T, cn>::vec_type> I, PtrStepSz<typename TypeVec<T, cn>::vec_type> J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, int level, dim3 block, dim3 patch, cudaStream_t stream) { typedef void(*func_t)(PtrStepSz<typename TypeVec<T, cn>::vec_type> I, PtrStepSz<typename TypeVec<T, cn>::vec_type> J, int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, int level, dim3 block, cudaStream_t stream); static const func_t funcs[5][5] = { { sparse_caller<cn, 1, 1,T>::call, sparse_caller<cn, 2, 1,T>::call, sparse_caller<cn, 3, 1,T>::call, sparse_caller<cn, 4, 1,T>::call, sparse_caller<cn, 5, 1,T>::call }, { sparse_caller<cn, 1, 2,T>::call, sparse_caller<cn, 2, 2,T>::call, sparse_caller<cn, 3, 2,T>::call, sparse_caller<cn, 4, 2,T>::call, sparse_caller<cn, 5, 2,T>::call }, { sparse_caller<cn, 1, 3,T>::call, sparse_caller<cn, 2, 3,T>::call, sparse_caller<cn, 3, 3,T>::call, sparse_caller<cn, 4, 3,T>::call, sparse_caller<cn, 5, 3,T>::call }, { sparse_caller<cn, 1, 4,T>::call, sparse_caller<cn, 2, 4,T>::call, sparse_caller<cn, 3, 4,T>::call, sparse_caller<cn, 4, 4,T>::call, sparse_caller<cn, 5, 4,T>::call }, { sparse_caller<cn, 1, 5,T>::call, sparse_caller<cn, 2, 5,T>::call, sparse_caller<cn, 3, 5,T>::call, sparse_caller<cn, 4, 5,T>::call, sparse_caller<cn, 5, 5,T>::call } }; Tex_I<cn, T>::bindTexture_(I); Tex_J<cn, T>::bindTexture_(J); funcs[patch.y - 1][patch.x - 1](I, J, I.rows, I.cols, prevPts, nextPts, status, err, ptcount, level, block, stream); } static void dense(PtrStepSz<T> I, PtrStepSz<T> J, PtrStepSzf u, PtrStepSzf v, PtrStepSzf prevU, PtrStepSzf prevV, PtrStepSzf err, int2 winSize, cudaStream_t stream) { dim3 block(16, 16); dim3 grid(divUp(I.cols, block.x), divUp(I.rows, block.y)); Tex_I<1, T>::bindTexture_(I); Tex_J<1, T>::bindTexture_(J); int2 halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2); const int patchWidth = block.x + 2 * halfWin.x; const int patchHeight = block.y + 2 * halfWin.y; size_t smem_size = 3 * patchWidth * patchHeight * sizeof(int); if (err.data) { denseKernel<true> << <grid, block, smem_size, stream >> >(u, v, prevU, prevV, err, I.rows, I.cols); cudaSafeCall(cudaGetLastError()); } else { denseKernel<false> << <grid, block, smem_size, stream >> >(u, v, prevU, prevV, PtrStepf(), I.rows, I.cols); cudaSafeCall(cudaGetLastError()); } if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } }; template class pyrLK_caller<unsigned char,1>; template class pyrLK_caller<unsigned short,1>; template class pyrLK_caller<int,1>; template class pyrLK_caller<float,1>; template class pyrLK_caller<unsigned char, 3>; template class pyrLK_caller<unsigned short, 3>; template class pyrLK_caller<int, 3>; template class pyrLK_caller<float, 3>; template class pyrLK_caller<unsigned char, 4>; template class pyrLK_caller<unsigned short, 4>; template class pyrLK_caller<int, 4>; template class pyrLK_caller<float, 4>; } #endif /* CUDA_DISABLER */
4b2e8ab4fbbc5e2a193bc7533b6580ebc6216a83.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file * Implementation of batched 2D FFTs with CUDA * * @author Jim Hardwick * * @note * This code was posted to nVidia's public CUDA forum as thanks to the CUDA * forum membership for their assistance. No copyright was asserted and no * license terms were imposed in the post. The post can be found at * http://forums.nvidia.com/index.php?showtopic=34241 */ /** * @note * Modified and extended to suit the mr_recon project... */ #include <hipfft.h> #include "batchfft.hcu" #include "uint_util.hcu" #define BLOCK_DIM 16 ///< Thread block dimension. (TSS: I think think should be 'half_warp_size' "instead"?) texture<float2, 1> permute_tex; // Choose which "3D" transpose void prepare_xy_transpose( batchfftHandle *plan ); //void prepare_yz_transpose( batchfftHandle *plan ); void prepare_xz_transpose( batchfftHandle *plan ); /** * Fast matrix transpose kernel * * Uses shared memory to coalesce global memory reads and writes, improving performance. * * @param idata Input batch of matrices * @param odata Output buffer for transposed batch of matrices, must be different than idata * @param width Width of each matrix, must be a multiple of BLOCK_DIM * @param height Height of each matrix, must be a multiple of BLOCK_DIM * @param num Matrices in the batch */ __global__ void transpose_xy(float2* idata, float2* odata, int width, int height, int num); __global__ void transpose_xz(float2* idata, float2* odata, int width, int height, int num_y, int num_batch); __global__ void transpose_zx(float2* idata, float2* odata, int width, int height, int num_y, int num_batch); // Permute kernel ("3D transpose") __global__ void FFT_permute_kernel( cuFloatComplex* data_in, cuFloatComplex* data_out, uint4 dim, unsigned int num_shifts ); //////////////////////////////////////////////////////////////////////////////// hipfftResult batchfftPlan2d(batchfftHandle* plan, int nx, int ny, hipfftType type, int batch) { if(type != HIPFFT_C2C) return HIPFFT_INVALID_TYPE; if((nx % BLOCK_DIM) != 0) return HIPFFT_INVALID_SIZE; if((ny % BLOCK_DIM) != 0) return HIPFFT_INVALID_SIZE; // Swap nx and ny so they correspoind to the 2D CUFFT API. plan->nx = ny; plan->ny = nx; plan->nz = 1; plan->type = type; plan->batch = batch; plan->transpose_threads.x = BLOCK_DIM; plan->transpose_threads.y = BLOCK_DIM; plan->transpose_threads.z = 1; plan->transpose_grid.x = plan->nx / BLOCK_DIM; plan->transpose_grid.y = plan->ny / BLOCK_DIM; plan->transpose_grid.z = 1; plan->transpose_back_grid.x = plan->ny / BLOCK_DIM; plan->transpose_back_grid.y = plan->nx / BLOCK_DIM; plan->transpose_back_grid.z = 1; hipfftResult ret = HIPFFT_SUCCESS; hipError_t cudaret = hipSuccess; cudaret = hipMalloc(&(plan->temp), plan->nx * plan->ny * plan->batch * sizeof(float2)); if(cudaret != hipSuccess) return HIPFFT_ALLOC_FAILED; ret = hipfftPlan1d(&(plan->rowplan), plan->nx, plan->type, plan->ny * plan->batch); if(ret != HIPFFT_SUCCESS) { cudaret = hipFree(plan->temp); if(cudaret != hipSuccess){ printf("\n'hipFree' failed: %s. Quitting.", hipGetErrorString(hipGetLastError())); fflush(stdout); exit(1); } plan->temp = NULL; return ret; } ret = hipfftPlan1d(&(plan->colplan), plan->ny, plan->type, plan->nx * plan->batch); if(ret != HIPFFT_SUCCESS) { cudaret = hipFree(plan->temp); if(cudaret != hipSuccess){ printf("\n'hipFree' failed: %s. Quitting.", hipGetErrorString(hipGetLastError())); fflush(stdout); } plan->temp = NULL; printf("\n 'hipfftPlan1d' failed!."); fflush(stdout); hipfftResult res2 = hipfftDestroy(plan->rowplan); if(res2 != HIPFFT_SUCCESS) { printf("\n'hipfftDestroy' failed: %s. Quitting.", hipGetErrorString(hipGetLastError())); fflush(stdout); exit(1); } return ret; } return HIPFFT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// hipfftResult batchfftPlan3d(batchfftHandle* plan, int nx, int ny, int nz, hipfftType type, int batch) { if(type != HIPFFT_C2C) return HIPFFT_INVALID_TYPE; if((nx % BLOCK_DIM) != 0){ printf("\nbatchFFT: all dimensions (current: %d) must be a multiple of the BLOCK_DIM (%d)", nx, BLOCK_DIM ); return HIPFFT_INVALID_SIZE; } if((ny % BLOCK_DIM) != 0){ printf("\nbatchFFT: all dimensions (current: %d) must be a multiple of the BLOCK_DIM (%d)", ny, BLOCK_DIM ); return HIPFFT_INVALID_SIZE; } if((nz % BLOCK_DIM) != 0){ printf("\nbatchFFT: all dimensions (current: %d) must be a multiple of the BLOCK_DIM (%d)", nz, BLOCK_DIM ); return HIPFFT_INVALID_SIZE; } // Swap nx, ny, and nz so they correspoind to the 3D CUFFT API. plan->nx = nz; plan->ny = ny; plan->nz = nx; plan->type = type; plan->batch = batch; plan->transpose_threads.x = BLOCK_DIM; plan->transpose_threads.y = BLOCK_DIM; plan->transpose_threads.z = 1; hipfftResult ret = HIPFFT_SUCCESS; hipError_t cudaret = hipSuccess; cudaret = hipMalloc(&(plan->temp), plan->nx * plan->ny * plan->nz * plan->batch * sizeof(float2)); if(cudaret != hipSuccess) return HIPFFT_ALLOC_FAILED; ret = hipfftPlan1d(&(plan->rowplan), plan->nx, plan->type, plan->ny * plan->nz * plan->batch); if(ret != HIPFFT_SUCCESS) { cudaret = hipFree(plan->temp); if(cudaret != hipSuccess){ printf("\n'hipFree' failed: %s. Quitting.", hipGetErrorString(hipGetLastError())); fflush(stdout); exit(1); } plan->temp = NULL; return ret; } ret = hipfftPlan1d(&(plan->colplan), plan->ny, plan->type, plan->nx * plan->nz * plan->batch); if(ret != HIPFFT_SUCCESS) { cudaret = hipFree(plan->temp); if(cudaret != hipSuccess){ printf("\n'hipFree' failed: %s. Quitting.", hipGetErrorString(hipGetLastError())); fflush(stdout); exit(1); } printf("\n 'hipfftPlan1d' failed!."); fflush(stdout); plan->temp = NULL; hipfftResult res2 = hipfftDestroy(plan->rowplan); if(res2 != HIPFFT_SUCCESS) { printf("\n'hipfftDestroy' failed: %s. Quitting.", hipGetErrorString(hipGetLastError())); fflush(stdout); exit(1); } return ret; } ret = hipfftPlan1d(&(plan->layerplan), plan->nz, plan->type, plan->nx * plan->ny * plan->batch); if(ret != HIPFFT_SUCCESS) { cudaret = hipFree(plan->temp); if(cudaret != hipSuccess){ printf("\n'hipFree' failed: %s. Quitting.", hipGetErrorString(hipGetLastError())); fflush(stdout); exit(1); } plan->temp = NULL; hipfftResult res2 = hipfftDestroy(plan->rowplan); if(res2 != HIPFFT_SUCCESS) { printf("\n'hipfftDestroy' failed: %s. Quitting.", hipGetErrorString(hipGetLastError())); fflush(stdout); exit(1); } res2 = hipfftDestroy(plan->colplan); if(res2 != HIPFFT_SUCCESS) { printf("\n'hipfftDestroy' failed: %s. Quitting.", hipGetErrorString(hipGetLastError())); fflush(stdout); exit(1); } return ret; } return HIPFFT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// hipfftResult batchfftDestroy(batchfftHandle* plan) { if(plan->temp != NULL) { hipfftResult res = hipfftDestroy(plan->rowplan); if(res != HIPFFT_SUCCESS) { printf("\n'hipfftDestroy' failed: %s. Quitting.", hipGetErrorString(hipGetLastError())); fflush(stdout); exit(1); } res = hipfftDestroy(plan->colplan); if(res != HIPFFT_SUCCESS) { printf("\n'hipfftDestroy' failed: %s. Quitting.", hipGetErrorString(hipGetLastError())); fflush(stdout); exit(1); } if( plan->nz != 1 ){ res = hipfftDestroy(plan->layerplan); if(res != HIPFFT_SUCCESS) { printf("\n'hipfftDestroy' failed: %s. Quitting.", hipGetErrorString(hipGetLastError())); fflush(stdout); exit(1); } } hipError_t cudaret = hipFree(plan->temp); if(cudaret != hipSuccess){ printf("\n'hipFree' failed: %s. Quitting.", hipGetErrorString(hipGetLastError())); fflush(stdout); exit(1); } plan->temp = NULL; } return HIPFFT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// hipfftResult batchfftExecute(batchfftHandle plan, void* idata, void* odata, int sign) { if( plan.nz == 1 ) { // 2D FFT hipfftResult cufftret = HIPFFT_SUCCESS; hipError_t cudaret = hipSuccess; // Transform rows cufftret = hipfftExecC2C(plan.rowplan, (cuFloatComplex*)idata, (cuFloatComplex*)odata, sign); if(cufftret != HIPFFT_SUCCESS) return cufftret; // Transpose hipLaunchKernelGGL(( transpose_xy), dim3(plan.transpose_grid), dim3(plan.transpose_threads) , 0, 0, (float2*)odata, (float2*)plan.temp, plan.nx, plan.ny, plan.batch); cudaret = hipGetLastError(); if(cudaret != hipSuccess) return HIPFFT_EXEC_FAILED; // Transform columns cufftret = hipfftExecC2C(plan.colplan, (cuFloatComplex*)plan.temp, (cuFloatComplex*)plan.temp, sign); if(cufftret != HIPFFT_SUCCESS) return cufftret; // Transpose back hipLaunchKernelGGL(( transpose_xy), dim3(plan.transpose_back_grid), dim3(plan.transpose_threads) , 0, 0, (float2*)plan.temp, (float2*)odata, plan.ny, plan.nx, plan.batch); cudaret = hipGetLastError(); if(cudaret != hipSuccess) return HIPFFT_EXEC_FAILED; } else{ // 3D FFT hipfftResult cufftret = HIPFFT_SUCCESS; hipError_t cudaret = hipSuccess; dim3 blockDim(512,1, 1); dim3 gridDim((int)ceil((double)(plan.nx*plan.ny*plan.nz*plan.batch)/512.0),1,1); const uint4 dims = make_uint4( plan.nx, plan.ny, plan.nz, plan.batch ); const hipChannelFormatDesc desc_float2 = hipCreateChannelDesc<float2>(); // Transform rows cufftret = hipfftExecC2C(plan.rowplan, (cuFloatComplex*)idata, (cuFloatComplex*)odata, sign); if(cufftret != HIPFFT_SUCCESS) return cufftret; // fft_permute_radix4( plan.nx, dims, 0, plan.ny*plan.nz*plan.batch, idata, odata, sign ); /* // Permute once "to the left" hipBindTexture( 0, &permute_tex, (float2*) odata, &desc_float2, prod(dims)*sizeof(float2)); FFT_permute_kernel<<< gridDim, blockDim >>>( (cuFloatComplex*) odata, (cuFloatComplex*) plan.temp, make_uint4(plan.nx, plan.ny, plan.nz, plan.batch), 1 ); hipUnbindTexture( permute_tex ); cudaret = hipGetLastError(); if(cudaret != hipSuccess) return HIPFFT_EXEC_FAILED; */ // Transpose prepare_xy_transpose( &plan ); hipLaunchKernelGGL(( transpose_xy), dim3(plan.transpose_grid), dim3(plan.transpose_threads) , 0, 0, (float2*)odata, (float2*)plan.temp, plan.nx, plan.ny, plan.nz*plan.batch); cudaret = hipGetLastError(); if(cudaret != hipSuccess) return HIPFFT_EXEC_FAILED; // Transform columns cufftret = hipfftExecC2C(plan.colplan, (cuFloatComplex*)plan.temp, (cuFloatComplex*) plan.temp, sign); if(cufftret != HIPFFT_SUCCESS) return cufftret; // Transpose back prepare_xy_transpose( &plan ); hipLaunchKernelGGL(( transpose_xy), dim3(plan.transpose_back_grid), dim3(plan.transpose_threads) , 0, 0, (float2*)plan.temp, (float2*)odata, plan.ny, plan.nx, plan.nz*plan.batch); cudaret = hipGetLastError(); if(cudaret != hipSuccess) return HIPFFT_EXEC_FAILED; // fft_permute_radix4( plan.ny, dims, 1, plan.nx*plan.nz*plan.batch, odata, odata, sign ); /* // Permute once "to the left" hipBindTexture( 0, &permute_tex, (float2*) plan.temp, &desc_float2, prod(dims)*sizeof(float2)); FFT_permute_kernel<<< gridDim, blockDim >>>( (cuFloatComplex*) plan.temp, (cuFloatComplex*) odata, make_uint4(plan.ny, plan.nz, plan.batch, plan.nx), 1 ); hipUnbindTexture( permute_tex ); cudaret = hipGetLastError(); if(cudaret != hipSuccess) return HIPFFT_EXEC_FAILED; */ // Transpose prepare_xz_transpose( &plan ); hipLaunchKernelGGL(( transpose_xz), dim3(plan.transpose_grid), dim3(plan.transpose_threads) , 0, 0, (float2*)odata, (float2*)plan.temp, plan.nx, plan.nz, plan.ny, plan.batch); cudaret = hipGetLastError(); if(cudaret != hipSuccess) return HIPFFT_EXEC_FAILED; // Transform layers cufftret = hipfftExecC2C(plan.layerplan, (cuFloatComplex*) plan.temp, (cuFloatComplex*)plan.temp, sign); if(cufftret != HIPFFT_SUCCESS) return cufftret; // Transpose back prepare_xz_transpose( &plan ); hipLaunchKernelGGL(( transpose_zx), dim3(plan.transpose_back_grid), dim3(plan.transpose_threads) , 0, 0, (float2*)plan.temp, (float2*)odata, plan.nz, plan.nx, plan.ny, plan.batch); cudaret = hipGetLastError(); if(cudaret != hipSuccess) return HIPFFT_EXEC_FAILED; // fft_permute_radix2( plan.nz, dims, 2, plan.nx*plan.ny*plan.batch, odata, odata, sign ); /* // Permute one final time to complete the loop hipBindTexture( 0, &permute_tex, (float2*) plan.temp, &desc_float2, prod(dims)*sizeof(float2)); FFT_permute_kernel<<< gridDim, blockDim >>>( (cuFloatComplex*) plan.temp, (cuFloatComplex*) odata, make_uint4(plan.nz, plan.batch, plan.nx, plan.ny), 2 ); hipUnbindTexture( permute_tex ); cudaret = hipGetLastError(); if(cudaret != hipSuccess) return HIPFFT_EXEC_FAILED; */ } return HIPFFT_SUCCESS; } /////////////////////////////////////////////////////////////////////////////// __global__ void transpose_xy(float2* idata, float2* odata, int width, int height, int num) { // To prevent shared memory bank confilcts: // - Load each component into a different array. Since the array size is a // multiple of the number of banks (16), each thread reads x and y from // the same bank. If a single float2 array is used, thread n would read // x and y from banks n and n+1, and thread n+8 would read values from the // same banks - causing a bank conflict. // - Use BLOCK_DIM+1 as the x size of the array. This way each row of the // array starts in a different bank - so reading from shared memory // doesn't cause bank conflicts when writing the transpose out to global // memory. __shared__ float blockx[(BLOCK_DIM+1)*BLOCK_DIM]; __shared__ float blocky[(BLOCK_DIM+1)*BLOCK_DIM]; unsigned int xBlock = BLOCK_DIM * blockIdx.x; unsigned int yBlock = BLOCK_DIM * blockIdx.y; unsigned int xIndex = xBlock + threadIdx.x; unsigned int yIndex = yBlock + threadIdx.y; unsigned int size = width * height; for(int n = 0; n < num; ++n){ unsigned int index_in = n * size + width * yIndex + xIndex; unsigned int index_block = threadIdx.y * (BLOCK_DIM+1) + threadIdx.x; float2 in = idata[index_in]; blockx[index_block] = in.x; blocky[index_block] = in.y; unsigned int index_transpose = threadIdx.x * (BLOCK_DIM+1) + threadIdx.y; unsigned int index_out = n * size + height * (xBlock + threadIdx.y) + yBlock + threadIdx.x; __syncthreads(); float2 out = make_float2( blockx[index_transpose], blocky[index_transpose] ); odata[index_out] = out; } } __global__ void transpose_xz(float2* idata, float2* odata, int width, int height, int num_y, int num_batch) { // As above, but the 'y' direction elements are now a slice rather than a line apart __shared__ float blockx[(BLOCK_DIM+1)*BLOCK_DIM]; __shared__ float blocky[(BLOCK_DIM+1)*BLOCK_DIM]; unsigned int xBlock = BLOCK_DIM * blockIdx.x; unsigned int yBlock = BLOCK_DIM * blockIdx.y; unsigned int xIndex = xBlock + threadIdx.x; unsigned int yIndex = yBlock + threadIdx.y; unsigned int size = width * height; unsigned int orig_size = width*num_y; unsigned int volume = size*num_y; for( int n_b = 0; n_b < num_batch; ++n_b ){ for( int n_y = 0; n_y < num_y; ++n_y ){ // unsigned int index_in = n * size + width * yIndex + xIndex; unsigned int index_in = n_b * volume + n_y * width + orig_size * yIndex + xIndex; unsigned int index_block = threadIdx.y * (BLOCK_DIM+1) + threadIdx.x; float2 in = idata[index_in]; blockx[index_block] = in.x; blocky[index_block] = in.y; unsigned int index_transpose = threadIdx.x * (BLOCK_DIM+1) + threadIdx.y; // unsigned int index_out = n * size + height * (xBlock + threadIdx.y) + yBlock + threadIdx.x; unsigned int index_out = n_b * volume + n_y * size + height * (xBlock + threadIdx.y) + yBlock + threadIdx.x; __syncthreads(); float2 out = make_float2( blockx[index_transpose], blocky[index_transpose] ); odata[index_out] = out; } } } __global__ void transpose_zx(float2* idata, float2* odata, int width, int height, int num_y, int num_batch) { // As above, but the 'y' direction elements are now a slice rather than a line apart __shared__ float blockx[(BLOCK_DIM+1)*BLOCK_DIM]; __shared__ float blocky[(BLOCK_DIM+1)*BLOCK_DIM]; unsigned int xBlock = BLOCK_DIM * blockIdx.x; unsigned int yBlock = BLOCK_DIM * blockIdx.y; unsigned int xIndex = xBlock + threadIdx.x; unsigned int yIndex = yBlock + threadIdx.y; unsigned int size = width * height; unsigned int orig_size = height*num_y; unsigned int volume = size*num_y; for( int n_b = 0; n_b < num_batch; ++n_b ){ for( int n_y = 0; n_y < num_y; ++n_y ){ // unsigned int index_in = n * size + width * yIndex + xIndex; unsigned int index_in = n_b * volume + n_y * size + width * yIndex + xIndex; unsigned int index_block = threadIdx.y * (BLOCK_DIM+1) + threadIdx.x; float2 in = idata[index_in]; blockx[index_block] = in.x; blocky[index_block] = in.y; unsigned int index_transpose = threadIdx.x * (BLOCK_DIM+1) + threadIdx.y; // unsigned int index_out = n * size + height * (xBlock + threadIdx.y) + yBlock + threadIdx.x; unsigned int index_out = n_b * volume + n_y * height + orig_size * xBlock + yBlock + orig_size * threadIdx.y + threadIdx.x; __syncthreads(); float2 out = make_float2( blockx[index_transpose], blocky[index_transpose] ); odata[index_out] = out; } } } /* __global__ void FFT_permute_kernel( cuFloatComplex* data_in, cuFloatComplex* data_out, uint4 dim, unsigned int num_shifts ) { //This is the current pixel number unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; // Do nothing if index is out of range if( idx < prod(dim) ){ uint4 new_dim; uint4 co = idx_to_co(idx, dim); co = shift_down( co, num_shifts ); new_dim = shift_down( dim, num_shifts ); data_out[co_to_idx(co,new_dim)] = data_in[idx]; } } */ __global__ void FFT_permute_kernel( cuFloatComplex* data_in, cuFloatComplex* data_out, uint4 dim, unsigned int num_shifts ) { //This is the current pixel number unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; // Do nothing if index is out of range if( idx < prod(dim) ){ uint4 new_dim = shift_down( dim, num_shifts ); uint4 co = idx_to_co(idx, new_dim); co = shift_up( co, num_shifts ); // data_out[idx] = data_in[co_to_idx(co,dim)]; data_out[idx] = tex1Dfetch( permute_tex, co_to_idx(co,dim) ); } } void prepare_xy_transpose( batchfftHandle *plan ) { plan->transpose_grid.x = plan->nx / BLOCK_DIM; plan->transpose_grid.y = plan->ny / BLOCK_DIM; plan->transpose_grid.z = 1; plan->transpose_back_grid.x = plan->ny / BLOCK_DIM; plan->transpose_back_grid.y = plan->nx / BLOCK_DIM; plan->transpose_back_grid.z = 1; } /* void prepare_yz_transpose( batchfftHandle *plan ) { plan->transpose_grid.x = plan->ny / BLOCK_DIM; plan->transpose_grid.y = plan->nz / BLOCK_DIM; plan->transpose_grid.z = 1; plan->transpose_back_grid.x = plan->nz / BLOCK_DIM; plan->transpose_back_grid.y = plan->ny / BLOCK_DIM; plan->transpose_back_grid.z = 1; } */ void prepare_xz_transpose( batchfftHandle *plan ) { plan->transpose_grid.x = plan->nx / BLOCK_DIM; plan->transpose_grid.y = plan->nz / BLOCK_DIM; plan->transpose_grid.z = 1; plan->transpose_back_grid.x = plan->nz / BLOCK_DIM; plan->transpose_back_grid.y = plan->nx / BLOCK_DIM; plan->transpose_back_grid.z = 1; }
4b2e8ab4fbbc5e2a193bc7533b6580ebc6216a83.cu
/** * @file * Implementation of batched 2D FFTs with CUDA * * @author Jim Hardwick * * @note * This code was posted to nVidia's public CUDA forum as thanks to the CUDA * forum membership for their assistance. No copyright was asserted and no * license terms were imposed in the post. The post can be found at * http://forums.nvidia.com/index.php?showtopic=34241 */ /** * @note * Modified and extended to suit the mr_recon project... */ #include <cufft.h> #include "batchfft.hcu" #include "uint_util.hcu" #define BLOCK_DIM 16 ///< Thread block dimension. (TSS: I think think should be 'half_warp_size' "instead"?) texture<float2, 1> permute_tex; // Choose which "3D" transpose void prepare_xy_transpose( batchfftHandle *plan ); //void prepare_yz_transpose( batchfftHandle *plan ); void prepare_xz_transpose( batchfftHandle *plan ); /** * Fast matrix transpose kernel * * Uses shared memory to coalesce global memory reads and writes, improving performance. * * @param idata Input batch of matrices * @param odata Output buffer for transposed batch of matrices, must be different than idata * @param width Width of each matrix, must be a multiple of BLOCK_DIM * @param height Height of each matrix, must be a multiple of BLOCK_DIM * @param num Matrices in the batch */ __global__ void transpose_xy(float2* idata, float2* odata, int width, int height, int num); __global__ void transpose_xz(float2* idata, float2* odata, int width, int height, int num_y, int num_batch); __global__ void transpose_zx(float2* idata, float2* odata, int width, int height, int num_y, int num_batch); // Permute kernel ("3D transpose") __global__ void FFT_permute_kernel( cuFloatComplex* data_in, cuFloatComplex* data_out, uint4 dim, unsigned int num_shifts ); //////////////////////////////////////////////////////////////////////////////// cufftResult batchfftPlan2d(batchfftHandle* plan, int nx, int ny, cufftType type, int batch) { if(type != CUFFT_C2C) return CUFFT_INVALID_TYPE; if((nx % BLOCK_DIM) != 0) return CUFFT_INVALID_SIZE; if((ny % BLOCK_DIM) != 0) return CUFFT_INVALID_SIZE; // Swap nx and ny so they correspoind to the 2D CUFFT API. plan->nx = ny; plan->ny = nx; plan->nz = 1; plan->type = type; plan->batch = batch; plan->transpose_threads.x = BLOCK_DIM; plan->transpose_threads.y = BLOCK_DIM; plan->transpose_threads.z = 1; plan->transpose_grid.x = plan->nx / BLOCK_DIM; plan->transpose_grid.y = plan->ny / BLOCK_DIM; plan->transpose_grid.z = 1; plan->transpose_back_grid.x = plan->ny / BLOCK_DIM; plan->transpose_back_grid.y = plan->nx / BLOCK_DIM; plan->transpose_back_grid.z = 1; cufftResult ret = CUFFT_SUCCESS; cudaError_t cudaret = cudaSuccess; cudaret = cudaMalloc(&(plan->temp), plan->nx * plan->ny * plan->batch * sizeof(float2)); if(cudaret != cudaSuccess) return CUFFT_ALLOC_FAILED; ret = cufftPlan1d(&(plan->rowplan), plan->nx, plan->type, plan->ny * plan->batch); if(ret != CUFFT_SUCCESS) { cudaret = cudaFree(plan->temp); if(cudaret != cudaSuccess){ printf("\n'cudaFree' failed: %s. Quitting.", cudaGetErrorString(cudaGetLastError())); fflush(stdout); exit(1); } plan->temp = NULL; return ret; } ret = cufftPlan1d(&(plan->colplan), plan->ny, plan->type, plan->nx * plan->batch); if(ret != CUFFT_SUCCESS) { cudaret = cudaFree(plan->temp); if(cudaret != cudaSuccess){ printf("\n'cudaFree' failed: %s. Quitting.", cudaGetErrorString(cudaGetLastError())); fflush(stdout); } plan->temp = NULL; printf("\n 'cufftPlan1d' failed!."); fflush(stdout); cufftResult res2 = cufftDestroy(plan->rowplan); if(res2 != CUFFT_SUCCESS) { printf("\n'cufftDestroy' failed: %s. Quitting.", cudaGetErrorString(cudaGetLastError())); fflush(stdout); exit(1); } return ret; } return CUFFT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// cufftResult batchfftPlan3d(batchfftHandle* plan, int nx, int ny, int nz, cufftType type, int batch) { if(type != CUFFT_C2C) return CUFFT_INVALID_TYPE; if((nx % BLOCK_DIM) != 0){ printf("\nbatchFFT: all dimensions (current: %d) must be a multiple of the BLOCK_DIM (%d)", nx, BLOCK_DIM ); return CUFFT_INVALID_SIZE; } if((ny % BLOCK_DIM) != 0){ printf("\nbatchFFT: all dimensions (current: %d) must be a multiple of the BLOCK_DIM (%d)", ny, BLOCK_DIM ); return CUFFT_INVALID_SIZE; } if((nz % BLOCK_DIM) != 0){ printf("\nbatchFFT: all dimensions (current: %d) must be a multiple of the BLOCK_DIM (%d)", nz, BLOCK_DIM ); return CUFFT_INVALID_SIZE; } // Swap nx, ny, and nz so they correspoind to the 3D CUFFT API. plan->nx = nz; plan->ny = ny; plan->nz = nx; plan->type = type; plan->batch = batch; plan->transpose_threads.x = BLOCK_DIM; plan->transpose_threads.y = BLOCK_DIM; plan->transpose_threads.z = 1; cufftResult ret = CUFFT_SUCCESS; cudaError_t cudaret = cudaSuccess; cudaret = cudaMalloc(&(plan->temp), plan->nx * plan->ny * plan->nz * plan->batch * sizeof(float2)); if(cudaret != cudaSuccess) return CUFFT_ALLOC_FAILED; ret = cufftPlan1d(&(plan->rowplan), plan->nx, plan->type, plan->ny * plan->nz * plan->batch); if(ret != CUFFT_SUCCESS) { cudaret = cudaFree(plan->temp); if(cudaret != cudaSuccess){ printf("\n'cudaFree' failed: %s. Quitting.", cudaGetErrorString(cudaGetLastError())); fflush(stdout); exit(1); } plan->temp = NULL; return ret; } ret = cufftPlan1d(&(plan->colplan), plan->ny, plan->type, plan->nx * plan->nz * plan->batch); if(ret != CUFFT_SUCCESS) { cudaret = cudaFree(plan->temp); if(cudaret != cudaSuccess){ printf("\n'cudaFree' failed: %s. Quitting.", cudaGetErrorString(cudaGetLastError())); fflush(stdout); exit(1); } printf("\n 'cufftPlan1d' failed!."); fflush(stdout); plan->temp = NULL; cufftResult res2 = cufftDestroy(plan->rowplan); if(res2 != CUFFT_SUCCESS) { printf("\n'cufftDestroy' failed: %s. Quitting.", cudaGetErrorString(cudaGetLastError())); fflush(stdout); exit(1); } return ret; } ret = cufftPlan1d(&(plan->layerplan), plan->nz, plan->type, plan->nx * plan->ny * plan->batch); if(ret != CUFFT_SUCCESS) { cudaret = cudaFree(plan->temp); if(cudaret != cudaSuccess){ printf("\n'cudaFree' failed: %s. Quitting.", cudaGetErrorString(cudaGetLastError())); fflush(stdout); exit(1); } plan->temp = NULL; cufftResult res2 = cufftDestroy(plan->rowplan); if(res2 != CUFFT_SUCCESS) { printf("\n'cufftDestroy' failed: %s. Quitting.", cudaGetErrorString(cudaGetLastError())); fflush(stdout); exit(1); } res2 = cufftDestroy(plan->colplan); if(res2 != CUFFT_SUCCESS) { printf("\n'cufftDestroy' failed: %s. Quitting.", cudaGetErrorString(cudaGetLastError())); fflush(stdout); exit(1); } return ret; } return CUFFT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// cufftResult batchfftDestroy(batchfftHandle* plan) { if(plan->temp != NULL) { cufftResult res = cufftDestroy(plan->rowplan); if(res != CUFFT_SUCCESS) { printf("\n'cufftDestroy' failed: %s. Quitting.", cudaGetErrorString(cudaGetLastError())); fflush(stdout); exit(1); } res = cufftDestroy(plan->colplan); if(res != CUFFT_SUCCESS) { printf("\n'cufftDestroy' failed: %s. Quitting.", cudaGetErrorString(cudaGetLastError())); fflush(stdout); exit(1); } if( plan->nz != 1 ){ res = cufftDestroy(plan->layerplan); if(res != CUFFT_SUCCESS) { printf("\n'cufftDestroy' failed: %s. Quitting.", cudaGetErrorString(cudaGetLastError())); fflush(stdout); exit(1); } } cudaError_t cudaret = cudaFree(plan->temp); if(cudaret != cudaSuccess){ printf("\n'cudaFree' failed: %s. Quitting.", cudaGetErrorString(cudaGetLastError())); fflush(stdout); exit(1); } plan->temp = NULL; } return CUFFT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// cufftResult batchfftExecute(batchfftHandle plan, void* idata, void* odata, int sign) { if( plan.nz == 1 ) { // 2D FFT cufftResult cufftret = CUFFT_SUCCESS; cudaError_t cudaret = cudaSuccess; // Transform rows cufftret = cufftExecC2C(plan.rowplan, (cuFloatComplex*)idata, (cuFloatComplex*)odata, sign); if(cufftret != CUFFT_SUCCESS) return cufftret; // Transpose transpose_xy<<< plan.transpose_grid, plan.transpose_threads >>>((float2*)odata, (float2*)plan.temp, plan.nx, plan.ny, plan.batch); cudaret = cudaGetLastError(); if(cudaret != cudaSuccess) return CUFFT_EXEC_FAILED; // Transform columns cufftret = cufftExecC2C(plan.colplan, (cuFloatComplex*)plan.temp, (cuFloatComplex*)plan.temp, sign); if(cufftret != CUFFT_SUCCESS) return cufftret; // Transpose back transpose_xy<<< plan.transpose_back_grid, plan.transpose_threads >>>((float2*)plan.temp, (float2*)odata, plan.ny, plan.nx, plan.batch); cudaret = cudaGetLastError(); if(cudaret != cudaSuccess) return CUFFT_EXEC_FAILED; } else{ // 3D FFT cufftResult cufftret = CUFFT_SUCCESS; cudaError_t cudaret = cudaSuccess; dim3 blockDim(512,1, 1); dim3 gridDim((int)ceil((double)(plan.nx*plan.ny*plan.nz*plan.batch)/512.0),1,1); const uint4 dims = make_uint4( plan.nx, plan.ny, plan.nz, plan.batch ); const cudaChannelFormatDesc desc_float2 = cudaCreateChannelDesc<float2>(); // Transform rows cufftret = cufftExecC2C(plan.rowplan, (cuFloatComplex*)idata, (cuFloatComplex*)odata, sign); if(cufftret != CUFFT_SUCCESS) return cufftret; // fft_permute_radix4( plan.nx, dims, 0, plan.ny*plan.nz*plan.batch, idata, odata, sign ); /* // Permute once "to the left" cudaBindTexture( 0, &permute_tex, (float2*) odata, &desc_float2, prod(dims)*sizeof(float2)); FFT_permute_kernel<<< gridDim, blockDim >>>( (cuFloatComplex*) odata, (cuFloatComplex*) plan.temp, make_uint4(plan.nx, plan.ny, plan.nz, plan.batch), 1 ); cudaUnbindTexture( permute_tex ); cudaret = cudaGetLastError(); if(cudaret != cudaSuccess) return CUFFT_EXEC_FAILED; */ // Transpose prepare_xy_transpose( &plan ); transpose_xy<<< plan.transpose_grid, plan.transpose_threads >>>((float2*)odata, (float2*)plan.temp, plan.nx, plan.ny, plan.nz*plan.batch); cudaret = cudaGetLastError(); if(cudaret != cudaSuccess) return CUFFT_EXEC_FAILED; // Transform columns cufftret = cufftExecC2C(plan.colplan, (cuFloatComplex*)plan.temp, (cuFloatComplex*) plan.temp, sign); if(cufftret != CUFFT_SUCCESS) return cufftret; // Transpose back prepare_xy_transpose( &plan ); transpose_xy<<< plan.transpose_back_grid, plan.transpose_threads >>>((float2*)plan.temp, (float2*)odata, plan.ny, plan.nx, plan.nz*plan.batch); cudaret = cudaGetLastError(); if(cudaret != cudaSuccess) return CUFFT_EXEC_FAILED; // fft_permute_radix4( plan.ny, dims, 1, plan.nx*plan.nz*plan.batch, odata, odata, sign ); /* // Permute once "to the left" cudaBindTexture( 0, &permute_tex, (float2*) plan.temp, &desc_float2, prod(dims)*sizeof(float2)); FFT_permute_kernel<<< gridDim, blockDim >>>( (cuFloatComplex*) plan.temp, (cuFloatComplex*) odata, make_uint4(plan.ny, plan.nz, plan.batch, plan.nx), 1 ); cudaUnbindTexture( permute_tex ); cudaret = cudaGetLastError(); if(cudaret != cudaSuccess) return CUFFT_EXEC_FAILED; */ // Transpose prepare_xz_transpose( &plan ); transpose_xz<<< plan.transpose_grid, plan.transpose_threads >>>((float2*)odata, (float2*)plan.temp, plan.nx, plan.nz, plan.ny, plan.batch); cudaret = cudaGetLastError(); if(cudaret != cudaSuccess) return CUFFT_EXEC_FAILED; // Transform layers cufftret = cufftExecC2C(plan.layerplan, (cuFloatComplex*) plan.temp, (cuFloatComplex*)plan.temp, sign); if(cufftret != CUFFT_SUCCESS) return cufftret; // Transpose back prepare_xz_transpose( &plan ); transpose_zx<<< plan.transpose_back_grid, plan.transpose_threads >>>((float2*)plan.temp, (float2*)odata, plan.nz, plan.nx, plan.ny, plan.batch); cudaret = cudaGetLastError(); if(cudaret != cudaSuccess) return CUFFT_EXEC_FAILED; // fft_permute_radix2( plan.nz, dims, 2, plan.nx*plan.ny*plan.batch, odata, odata, sign ); /* // Permute one final time to complete the loop cudaBindTexture( 0, &permute_tex, (float2*) plan.temp, &desc_float2, prod(dims)*sizeof(float2)); FFT_permute_kernel<<< gridDim, blockDim >>>( (cuFloatComplex*) plan.temp, (cuFloatComplex*) odata, make_uint4(plan.nz, plan.batch, plan.nx, plan.ny), 2 ); cudaUnbindTexture( permute_tex ); cudaret = cudaGetLastError(); if(cudaret != cudaSuccess) return CUFFT_EXEC_FAILED; */ } return CUFFT_SUCCESS; } /////////////////////////////////////////////////////////////////////////////// __global__ void transpose_xy(float2* idata, float2* odata, int width, int height, int num) { // To prevent shared memory bank confilcts: // - Load each component into a different array. Since the array size is a // multiple of the number of banks (16), each thread reads x and y from // the same bank. If a single float2 array is used, thread n would read // x and y from banks n and n+1, and thread n+8 would read values from the // same banks - causing a bank conflict. // - Use BLOCK_DIM+1 as the x size of the array. This way each row of the // array starts in a different bank - so reading from shared memory // doesn't cause bank conflicts when writing the transpose out to global // memory. __shared__ float blockx[(BLOCK_DIM+1)*BLOCK_DIM]; __shared__ float blocky[(BLOCK_DIM+1)*BLOCK_DIM]; unsigned int xBlock = BLOCK_DIM * blockIdx.x; unsigned int yBlock = BLOCK_DIM * blockIdx.y; unsigned int xIndex = xBlock + threadIdx.x; unsigned int yIndex = yBlock + threadIdx.y; unsigned int size = width * height; for(int n = 0; n < num; ++n){ unsigned int index_in = n * size + width * yIndex + xIndex; unsigned int index_block = threadIdx.y * (BLOCK_DIM+1) + threadIdx.x; float2 in = idata[index_in]; blockx[index_block] = in.x; blocky[index_block] = in.y; unsigned int index_transpose = threadIdx.x * (BLOCK_DIM+1) + threadIdx.y; unsigned int index_out = n * size + height * (xBlock + threadIdx.y) + yBlock + threadIdx.x; __syncthreads(); float2 out = make_float2( blockx[index_transpose], blocky[index_transpose] ); odata[index_out] = out; } } __global__ void transpose_xz(float2* idata, float2* odata, int width, int height, int num_y, int num_batch) { // As above, but the 'y' direction elements are now a slice rather than a line apart __shared__ float blockx[(BLOCK_DIM+1)*BLOCK_DIM]; __shared__ float blocky[(BLOCK_DIM+1)*BLOCK_DIM]; unsigned int xBlock = BLOCK_DIM * blockIdx.x; unsigned int yBlock = BLOCK_DIM * blockIdx.y; unsigned int xIndex = xBlock + threadIdx.x; unsigned int yIndex = yBlock + threadIdx.y; unsigned int size = width * height; unsigned int orig_size = width*num_y; unsigned int volume = size*num_y; for( int n_b = 0; n_b < num_batch; ++n_b ){ for( int n_y = 0; n_y < num_y; ++n_y ){ // unsigned int index_in = n * size + width * yIndex + xIndex; unsigned int index_in = n_b * volume + n_y * width + orig_size * yIndex + xIndex; unsigned int index_block = threadIdx.y * (BLOCK_DIM+1) + threadIdx.x; float2 in = idata[index_in]; blockx[index_block] = in.x; blocky[index_block] = in.y; unsigned int index_transpose = threadIdx.x * (BLOCK_DIM+1) + threadIdx.y; // unsigned int index_out = n * size + height * (xBlock + threadIdx.y) + yBlock + threadIdx.x; unsigned int index_out = n_b * volume + n_y * size + height * (xBlock + threadIdx.y) + yBlock + threadIdx.x; __syncthreads(); float2 out = make_float2( blockx[index_transpose], blocky[index_transpose] ); odata[index_out] = out; } } } __global__ void transpose_zx(float2* idata, float2* odata, int width, int height, int num_y, int num_batch) { // As above, but the 'y' direction elements are now a slice rather than a line apart __shared__ float blockx[(BLOCK_DIM+1)*BLOCK_DIM]; __shared__ float blocky[(BLOCK_DIM+1)*BLOCK_DIM]; unsigned int xBlock = BLOCK_DIM * blockIdx.x; unsigned int yBlock = BLOCK_DIM * blockIdx.y; unsigned int xIndex = xBlock + threadIdx.x; unsigned int yIndex = yBlock + threadIdx.y; unsigned int size = width * height; unsigned int orig_size = height*num_y; unsigned int volume = size*num_y; for( int n_b = 0; n_b < num_batch; ++n_b ){ for( int n_y = 0; n_y < num_y; ++n_y ){ // unsigned int index_in = n * size + width * yIndex + xIndex; unsigned int index_in = n_b * volume + n_y * size + width * yIndex + xIndex; unsigned int index_block = threadIdx.y * (BLOCK_DIM+1) + threadIdx.x; float2 in = idata[index_in]; blockx[index_block] = in.x; blocky[index_block] = in.y; unsigned int index_transpose = threadIdx.x * (BLOCK_DIM+1) + threadIdx.y; // unsigned int index_out = n * size + height * (xBlock + threadIdx.y) + yBlock + threadIdx.x; unsigned int index_out = n_b * volume + n_y * height + orig_size * xBlock + yBlock + orig_size * threadIdx.y + threadIdx.x; __syncthreads(); float2 out = make_float2( blockx[index_transpose], blocky[index_transpose] ); odata[index_out] = out; } } } /* __global__ void FFT_permute_kernel( cuFloatComplex* data_in, cuFloatComplex* data_out, uint4 dim, unsigned int num_shifts ) { //This is the current pixel number unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; // Do nothing if index is out of range if( idx < prod(dim) ){ uint4 new_dim; uint4 co = idx_to_co(idx, dim); co = shift_down( co, num_shifts ); new_dim = shift_down( dim, num_shifts ); data_out[co_to_idx(co,new_dim)] = data_in[idx]; } } */ __global__ void FFT_permute_kernel( cuFloatComplex* data_in, cuFloatComplex* data_out, uint4 dim, unsigned int num_shifts ) { //This is the current pixel number unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; // Do nothing if index is out of range if( idx < prod(dim) ){ uint4 new_dim = shift_down( dim, num_shifts ); uint4 co = idx_to_co(idx, new_dim); co = shift_up( co, num_shifts ); // data_out[idx] = data_in[co_to_idx(co,dim)]; data_out[idx] = tex1Dfetch( permute_tex, co_to_idx(co,dim) ); } } void prepare_xy_transpose( batchfftHandle *plan ) { plan->transpose_grid.x = plan->nx / BLOCK_DIM; plan->transpose_grid.y = plan->ny / BLOCK_DIM; plan->transpose_grid.z = 1; plan->transpose_back_grid.x = plan->ny / BLOCK_DIM; plan->transpose_back_grid.y = plan->nx / BLOCK_DIM; plan->transpose_back_grid.z = 1; } /* void prepare_yz_transpose( batchfftHandle *plan ) { plan->transpose_grid.x = plan->ny / BLOCK_DIM; plan->transpose_grid.y = plan->nz / BLOCK_DIM; plan->transpose_grid.z = 1; plan->transpose_back_grid.x = plan->nz / BLOCK_DIM; plan->transpose_back_grid.y = plan->ny / BLOCK_DIM; plan->transpose_back_grid.z = 1; } */ void prepare_xz_transpose( batchfftHandle *plan ) { plan->transpose_grid.x = plan->nx / BLOCK_DIM; plan->transpose_grid.y = plan->nz / BLOCK_DIM; plan->transpose_grid.z = 1; plan->transpose_back_grid.x = plan->nz / BLOCK_DIM; plan->transpose_back_grid.y = plan->nx / BLOCK_DIM; plan->transpose_back_grid.z = 1; }
8dda867bcf0f19ebe310e1afc7c4737282fb3173.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHDeviceUtils.cuh> #include <vector> #include <iostream> int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 5 tensor at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) { using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); auto scores = boxes.select(1, 4); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); int boxes_num = boxes.size(0); const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); scalar_t* boxes_dev = boxes_sorted.data<scalar_t>(); THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState unsigned long long* mask_dev = NULL; //THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, // boxes_num * col_blocks * sizeof(unsigned long long))); mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long)); dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), THCCeilDiv(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); THCudaCheck(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } THCudaFree(state, mask_dev); // TODO improve this part return std::get<0>(order_t.index({ keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to( order_t.device(), keep.scalar_type()) }).sort(0, false)); }
8dda867bcf0f19ebe310e1afc7c4737282fb3173.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCDeviceUtils.cuh> #include <vector> #include <iostream> int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 5 tensor at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) { using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); auto scores = boxes.select(1, 4); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); int boxes_num = boxes.size(0); const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); scalar_t* boxes_dev = boxes_sorted.data<scalar_t>(); THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState unsigned long long* mask_dev = NULL; //THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, // boxes_num * col_blocks * sizeof(unsigned long long))); mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long)); dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), THCCeilDiv(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); THCudaCheck(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } THCudaFree(state, mask_dev); // TODO improve this part return std::get<0>(order_t.index({ keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to( order_t.device(), keep.scalar_type()) }).sort(0, false)); }
f9e3ecb801bd7b12620a04ecb0b9302f1b669e71.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cmath> #include <vector> #include "caffe/layer.hpp" #include "caffe/layers/lstm_layer.hpp" namespace caffe { template <typename Dtype> __device__ Dtype sigmoid(const Dtype x) { return Dtype(1) / (Dtype(1) + exp(-x)); } template <typename Dtype> __device__ Dtype tanh(const Dtype x) { return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1); } template <typename Dtype> __global__ void LSTMActsForward(const int nthreads, const int dim, const Dtype* X, Dtype* X_acts) { CUDA_KERNEL_LOOP(index, nthreads) { const int x_dim = 4 * dim; const int d = index % x_dim; if (d < 3 * dim) { X_acts[index] = sigmoid(X[index]); } else { X_acts[index] = tanh(X[index]); } } } template <typename Dtype> __global__ void LSTMUnitForward(const int nthreads, const int dim, const Dtype* C_prev, const Dtype* X, const Dtype* cont, Dtype* C, Dtype* H) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / dim; const int d = index % dim; const Dtype* X_offset = X + 4 * dim * n; const Dtype i = X_offset[d]; const Dtype f = X_offset[1 * dim + d]; const Dtype o = X_offset[2 * dim + d]; const Dtype g = X_offset[3 * dim + d]; const Dtype c_prev = C_prev[index]; Dtype lcont=1; if(cont) lcont=cont[n]; const Dtype c = lcont * f * c_prev + i * g; C[index] = c; const Dtype tanh_c = tanh(c); H[index] = o * tanh_c; } } template <typename Dtype> void LSTMUnitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[1]->count(); const Dtype* C_prev = bottom[0]->gpu_data(); const Dtype* X = bottom[1]->gpu_data(); const Dtype* cont = NULL; if(bottom.size()>=3) cont=bottom[2]->gpu_data(); Dtype* X_acts = X_acts_.mutable_gpu_data(); Dtype* C = top[0]->mutable_gpu_data(); Dtype* H = top[1]->mutable_gpu_data(); const int X_count = bottom[1]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LSTMActsForward<Dtype>), dim3(CAFFE_GET_BLOCKS(X_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, X_count, hidden_dim_, X, X_acts); CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LSTMUnitForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, hidden_dim_, C_prev, X_acts, cont, C, H); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void LSTMUnitBackward(const int nthreads, const int dim, const Dtype* C_prev, const Dtype* X, const Dtype* C, const Dtype* H, const Dtype* cont, const Dtype* C_diff, const Dtype* H_diff, Dtype* C_prev_diff, Dtype* X_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / dim; const int d = index % dim; const Dtype* X_offset = X + 4 * dim * n; const Dtype i = X_offset[d]; const Dtype f = X_offset[1 * dim + d]; const Dtype o = X_offset[2 * dim + d]; const Dtype g = X_offset[3 * dim + d]; const Dtype c_prev = C_prev[index]; const Dtype c = C[index]; const Dtype tanh_c = tanh(c); Dtype* c_prev_diff = C_prev_diff + index; Dtype* X_diff_offset = X_diff + 4 * dim * n; Dtype* i_diff = X_diff_offset + d; Dtype* f_diff = X_diff_offset + 1 * dim + d; Dtype* o_diff = X_diff_offset + 2 * dim + d; Dtype* g_diff = X_diff_offset + 3 * dim + d; const Dtype c_term_diff = C_diff[index] + H_diff[index] * o * (1 - tanh_c * tanh_c); Dtype cont_n = 1; if(cont) cont_n=cont[n]; *c_prev_diff = cont_n * c_term_diff * f; *i_diff = c_term_diff * g; *f_diff = cont_n * c_term_diff * c_prev; *o_diff = H_diff[index] * tanh_c; *g_diff = c_term_diff * i; } } template <typename Dtype> __global__ void LSTMActsBackward(const int nthreads, const int dim, const Dtype* X_acts, const Dtype* X_acts_diff, Dtype* X_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int x_dim = 4 * dim; const int d = index % x_dim; const Dtype X_act = X_acts[index]; if (d < 3 * dim) { X_diff[index] = X_acts_diff[index] * X_act * (Dtype(1) - X_act); } else { X_diff[index] = X_acts_diff[index] * (Dtype(1) - X_act * X_act); } } } template <typename Dtype> void LSTMUnitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if(bottom.size()>=3) { CHECK(!propagate_down[2]) << "Cannot backpropagate to sequence indicators."; } if (!propagate_down[0] && !propagate_down[1]) { return; } const int count = top[1]->count(); const Dtype* C_prev = bottom[0]->gpu_data(); const Dtype* X_acts = X_acts_.gpu_data(); const Dtype* cont = NULL; if(bottom.size()>=3) cont=bottom[2]->gpu_data(); const Dtype* C = top[0]->gpu_data(); const Dtype* H = top[1]->gpu_data(); const Dtype* C_diff = top[0]->gpu_diff(); const Dtype* H_diff = top[1]->gpu_diff(); Dtype* C_prev_diff = bottom[0]->mutable_gpu_diff(); Dtype* X_acts_diff = X_acts_.mutable_gpu_diff(); LSTMUnitBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, hidden_dim_, C_prev, X_acts, C, H, cont, C_diff, H_diff, C_prev_diff, X_acts_diff); CUDA_POST_KERNEL_CHECK; const int X_count = bottom[1]->count(); Dtype* X_diff = bottom[1]->mutable_gpu_diff(); LSTMActsBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(X_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, X_count, hidden_dim_, X_acts, X_acts_diff, X_diff); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(LSTMUnitLayer); } // namespace caffe
f9e3ecb801bd7b12620a04ecb0b9302f1b669e71.cu
#include <algorithm> #include <cmath> #include <vector> #include "caffe/layer.hpp" #include "caffe/layers/lstm_layer.hpp" namespace caffe { template <typename Dtype> __device__ Dtype sigmoid(const Dtype x) { return Dtype(1) / (Dtype(1) + exp(-x)); } template <typename Dtype> __device__ Dtype tanh(const Dtype x) { return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1); } template <typename Dtype> __global__ void LSTMActsForward(const int nthreads, const int dim, const Dtype* X, Dtype* X_acts) { CUDA_KERNEL_LOOP(index, nthreads) { const int x_dim = 4 * dim; const int d = index % x_dim; if (d < 3 * dim) { X_acts[index] = sigmoid(X[index]); } else { X_acts[index] = tanh(X[index]); } } } template <typename Dtype> __global__ void LSTMUnitForward(const int nthreads, const int dim, const Dtype* C_prev, const Dtype* X, const Dtype* cont, Dtype* C, Dtype* H) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / dim; const int d = index % dim; const Dtype* X_offset = X + 4 * dim * n; const Dtype i = X_offset[d]; const Dtype f = X_offset[1 * dim + d]; const Dtype o = X_offset[2 * dim + d]; const Dtype g = X_offset[3 * dim + d]; const Dtype c_prev = C_prev[index]; Dtype lcont=1; if(cont) lcont=cont[n]; const Dtype c = lcont * f * c_prev + i * g; C[index] = c; const Dtype tanh_c = tanh(c); H[index] = o * tanh_c; } } template <typename Dtype> void LSTMUnitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[1]->count(); const Dtype* C_prev = bottom[0]->gpu_data(); const Dtype* X = bottom[1]->gpu_data(); const Dtype* cont = NULL; if(bottom.size()>=3) cont=bottom[2]->gpu_data(); Dtype* X_acts = X_acts_.mutable_gpu_data(); Dtype* C = top[0]->mutable_gpu_data(); Dtype* H = top[1]->mutable_gpu_data(); const int X_count = bottom[1]->count(); // NOLINT_NEXT_LINE(whitespace/operators) LSTMActsForward<Dtype><<<CAFFE_GET_BLOCKS(X_count), CAFFE_CUDA_NUM_THREADS>>>( X_count, hidden_dim_, X, X_acts); CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) LSTMUnitForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, hidden_dim_, C_prev, X_acts, cont, C, H); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void LSTMUnitBackward(const int nthreads, const int dim, const Dtype* C_prev, const Dtype* X, const Dtype* C, const Dtype* H, const Dtype* cont, const Dtype* C_diff, const Dtype* H_diff, Dtype* C_prev_diff, Dtype* X_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / dim; const int d = index % dim; const Dtype* X_offset = X + 4 * dim * n; const Dtype i = X_offset[d]; const Dtype f = X_offset[1 * dim + d]; const Dtype o = X_offset[2 * dim + d]; const Dtype g = X_offset[3 * dim + d]; const Dtype c_prev = C_prev[index]; const Dtype c = C[index]; const Dtype tanh_c = tanh(c); Dtype* c_prev_diff = C_prev_diff + index; Dtype* X_diff_offset = X_diff + 4 * dim * n; Dtype* i_diff = X_diff_offset + d; Dtype* f_diff = X_diff_offset + 1 * dim + d; Dtype* o_diff = X_diff_offset + 2 * dim + d; Dtype* g_diff = X_diff_offset + 3 * dim + d; const Dtype c_term_diff = C_diff[index] + H_diff[index] * o * (1 - tanh_c * tanh_c); Dtype cont_n = 1; if(cont) cont_n=cont[n]; *c_prev_diff = cont_n * c_term_diff * f; *i_diff = c_term_diff * g; *f_diff = cont_n * c_term_diff * c_prev; *o_diff = H_diff[index] * tanh_c; *g_diff = c_term_diff * i; } } template <typename Dtype> __global__ void LSTMActsBackward(const int nthreads, const int dim, const Dtype* X_acts, const Dtype* X_acts_diff, Dtype* X_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int x_dim = 4 * dim; const int d = index % x_dim; const Dtype X_act = X_acts[index]; if (d < 3 * dim) { X_diff[index] = X_acts_diff[index] * X_act * (Dtype(1) - X_act); } else { X_diff[index] = X_acts_diff[index] * (Dtype(1) - X_act * X_act); } } } template <typename Dtype> void LSTMUnitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if(bottom.size()>=3) { CHECK(!propagate_down[2]) << "Cannot backpropagate to sequence indicators."; } if (!propagate_down[0] && !propagate_down[1]) { return; } const int count = top[1]->count(); const Dtype* C_prev = bottom[0]->gpu_data(); const Dtype* X_acts = X_acts_.gpu_data(); const Dtype* cont = NULL; if(bottom.size()>=3) cont=bottom[2]->gpu_data(); const Dtype* C = top[0]->gpu_data(); const Dtype* H = top[1]->gpu_data(); const Dtype* C_diff = top[0]->gpu_diff(); const Dtype* H_diff = top[1]->gpu_diff(); Dtype* C_prev_diff = bottom[0]->mutable_gpu_diff(); Dtype* X_acts_diff = X_acts_.mutable_gpu_diff(); LSTMUnitBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, hidden_dim_, C_prev, X_acts, C, H, cont, C_diff, H_diff, C_prev_diff, X_acts_diff); CUDA_POST_KERNEL_CHECK; const int X_count = bottom[1]->count(); Dtype* X_diff = bottom[1]->mutable_gpu_diff(); LSTMActsBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(X_count), CAFFE_CUDA_NUM_THREADS>>>( X_count, hidden_dim_, X_acts, X_acts_diff, X_diff); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(LSTMUnitLayer); } // namespace caffe
2711cf2ebb42d0533ecbbd223fbc0ee97d4cee52.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2017-2019 XGBoost contributors */ #include <thrust/copy.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/reduce.h> #include <xgboost/tree_updater.h> #include <algorithm> #include <cmath> #include <memory> #include <limits> #include <queue> #include <utility> #include <vector> #include "../common/common.h" #include "../common/compressed_iterator.h" #include "../common/device_helpers.cuh" #include "../common/hist_util.h" #include "../common/host_device_vector.h" #include "../common/timer.h" #include "../common/span.h" #include "param.h" #include "updater_gpu_common.cuh" #include "constraints.cuh" #include "gpu_hist/row_partitioner.cuh" namespace xgboost { namespace tree { #if !defined(GTEST_TEST) DMLC_REGISTRY_FILE_TAG(updater_gpu_hist); #endif // !defined(GTEST_TEST) // training parameters specific to this algorithm struct GPUHistMakerTrainParam : public dmlc::Parameter<GPUHistMakerTrainParam> { bool single_precision_histogram; // number of rows in a single GPU batch int gpu_batch_nrows; bool debug_synchronize; // declare parameters DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) { DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe( "Use single precision to build histograms."); DMLC_DECLARE_FIELD(gpu_batch_nrows) .set_lower_bound(-1) .set_default(0) .describe("Number of rows in a GPU batch, used for finding quantiles on GPU; " "-1 to use all rows assignted to a GPU, and 0 to auto-deduce"); DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe( "Check if all distributed tree are identical after tree construction."); } }; #if !defined(GTEST_TEST) DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam); #endif // !defined(GTEST_TEST) struct ExpandEntry { int nid; int depth; DeviceSplitCandidate split; uint64_t timestamp; ExpandEntry() = default; ExpandEntry(int nid, int depth, DeviceSplitCandidate split, uint64_t timestamp) : nid(nid), depth(depth), split(std::move(split)), timestamp(timestamp) {} bool IsValid(const TrainParam& param, int num_leaves) const { if (split.loss_chg <= kRtEps) return false; if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) { return false; } if (param.max_depth > 0 && depth == param.max_depth) return false; if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false; return true; } static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) { if (param.max_depth > 0 && depth >= param.max_depth) return false; if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false; return true; } friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) { os << "ExpandEntry: \n"; os << "nidx: " << e.nid << "\n"; os << "depth: " << e.depth << "\n"; os << "loss: " << e.split.loss_chg << "\n"; os << "left_sum: " << e.split.left_sum << "\n"; os << "right_sum: " << e.split.right_sum << "\n"; return os; } }; inline static bool DepthWise(ExpandEntry lhs, ExpandEntry rhs) { if (lhs.depth == rhs.depth) { return lhs.timestamp > rhs.timestamp; // favor small timestamp } else { return lhs.depth > rhs.depth; // favor small depth } } inline static bool LossGuide(ExpandEntry lhs, ExpandEntry rhs) { if (lhs.split.loss_chg == rhs.split.loss_chg) { return lhs.timestamp > rhs.timestamp; // favor small timestamp } else { return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg } } // Find a gidx value for a given feature otherwise return -1 if not found __forceinline__ __device__ int BinarySearchRow( bst_uint begin, bst_uint end, common::CompressedIterator<uint32_t> data, int const fidx_begin, int const fidx_end) { bst_uint previous_middle = UINT32_MAX; while (end != begin) { auto middle = begin + (end - begin) / 2; if (middle == previous_middle) { break; } previous_middle = middle; auto gidx = data[middle]; if (gidx >= fidx_begin && gidx < fidx_end) { return gidx; } else if (gidx < fidx_begin) { begin = middle; } else { end = middle; } } // Value is missing return -1; } /** \brief Struct for accessing and manipulating an ellpack matrix on the * device. Does not own underlying memory and may be trivially copied into * kernels.*/ struct ELLPackMatrix { common::Span<uint32_t> feature_segments; /*! \brief minimum value for each feature. */ common::Span<bst_float> min_fvalue; /*! \brief Cut. */ common::Span<bst_float> gidx_fvalue_map; /*! \brief row length for ELLPack. */ size_t row_stride{0}; common::CompressedIterator<uint32_t> gidx_iter; bool is_dense; int null_gidx_value; XGBOOST_DEVICE size_t BinCount() const { return gidx_fvalue_map.size(); } // Get a matrix element, uses binary search for look up // Return NaN if missing __device__ bst_float GetElement(size_t ridx, size_t fidx) const { auto row_begin = row_stride * ridx; auto row_end = row_begin + row_stride; auto gidx = -1; if (is_dense) { gidx = gidx_iter[row_begin + fidx]; } else { gidx = BinarySearchRow(row_begin, row_end, gidx_iter, feature_segments[fidx], feature_segments[fidx + 1]); } if (gidx == -1) { return nan(""); } return gidx_fvalue_map[gidx]; } void Init(common::Span<uint32_t> feature_segments, common::Span<bst_float> min_fvalue, common::Span<bst_float> gidx_fvalue_map, size_t row_stride, common::CompressedIterator<uint32_t> gidx_iter, bool is_dense, int null_gidx_value) { this->feature_segments = feature_segments; this->min_fvalue = min_fvalue; this->gidx_fvalue_map = gidx_fvalue_map; this->row_stride = row_stride; this->gidx_iter = gidx_iter; this->is_dense = is_dense; this->null_gidx_value = null_gidx_value; } }; // With constraints template <typename GradientPairT> XGBOOST_DEVICE float inline LossChangeMissing( const GradientPairT& scan, const GradientPairT& missing, const GradientPairT& parent_sum, const float& parent_gain, const GPUTrainingParam& param, int constraint, const ValueConstraint& value_constraint, bool& missing_left_out) { // NOLINT float missing_left_gain = value_constraint.CalcSplitGain( param, constraint, GradStats(scan + missing), GradStats(parent_sum - (scan + missing))); float missing_right_gain = value_constraint.CalcSplitGain( param, constraint, GradStats(scan), GradStats(parent_sum - scan)); if (missing_left_gain >= missing_right_gain) { missing_left_out = true; return missing_left_gain - parent_gain; } else { missing_left_out = false; return missing_right_gain - parent_gain; } } /*! * \brief * * \tparam ReduceT BlockReduce Type. * \tparam TempStorage Cub Shared memory * * \param begin * \param end * \param temp_storage Shared memory for intermediate result. */ template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT, typename GradientSumT> __device__ GradientSumT ReduceFeature(common::Span<const GradientSumT> feature_histogram, TempStorageT* temp_storage) { __shared__ cub::Uninitialized<GradientSumT> uninitialized_sum; GradientSumT& shared_sum = uninitialized_sum.Alias(); GradientSumT local_sum = GradientSumT(); // For loop sums features into one block size auto begin = feature_histogram.data(); auto end = begin + feature_histogram.size(); for (auto itr = begin; itr < end; itr += BLOCK_THREADS) { bool thread_active = itr + threadIdx.x < end; // Scan histogram GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT(); local_sum += bin; } local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, hipcub::Sum()); // Reduction result is stored in thread 0. if (threadIdx.x == 0) { shared_sum = local_sum; } __syncthreads(); return shared_sum; } /*! \brief Find the thread with best gain. */ template <int BLOCK_THREADS, typename ReduceT, typename ScanT, typename MaxReduceT, typename TempStorageT, typename GradientSumT> __device__ void EvaluateFeature( int fidx, common::Span<const GradientSumT> node_histogram, const ELLPackMatrix& matrix, DeviceSplitCandidate* best_split, // shared memory storing best split const DeviceNodeStats& node, const GPUTrainingParam& param, TempStorageT* temp_storage, // temp memory for cub operations int constraint, // monotonic_constraints const ValueConstraint& value_constraint) { // Use pointer from cut to indicate begin and end of bins for each feature. uint32_t gidx_begin = matrix.feature_segments[fidx]; // begining bin uint32_t gidx_end = matrix.feature_segments[fidx + 1]; // end bin for i^th feature // Sum histogram bins for current feature GradientSumT const feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT>( node_histogram.subspan(gidx_begin, gidx_end - gidx_begin), temp_storage); GradientSumT const parent_sum = GradientSumT(node.sum_gradients); GradientSumT const missing = parent_sum - feature_sum; float const null_gain = -std::numeric_limits<bst_float>::infinity(); SumCallbackOp<GradientSumT> prefix_op = SumCallbackOp<GradientSumT>(); for (int scan_begin = gidx_begin; scan_begin < gidx_end; scan_begin += BLOCK_THREADS) { bool thread_active = (scan_begin + threadIdx.x) < gidx_end; // Gradient value for current bin. GradientSumT bin = thread_active ? node_histogram[scan_begin + threadIdx.x] : GradientSumT(); ScanT(temp_storage->scan).ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op); // Whether the gradient of missing values is put to the left side. bool missing_left = true; float gain = null_gain; if (thread_active) { gain = LossChangeMissing(bin, missing, parent_sum, node.root_gain, param, constraint, value_constraint, missing_left); } __syncthreads(); // Find thread with best gain hipcub::KeyValuePair<int, float> tuple(threadIdx.x, gain); hipcub::KeyValuePair<int, float> best = MaxReduceT(temp_storage->max_reduce).Reduce(tuple, hipcub::ArgMax()); __shared__ hipcub::KeyValuePair<int, float> block_max; if (threadIdx.x == 0) { block_max = best; } __syncthreads(); // Best thread updates split if (threadIdx.x == block_max.key) { int split_gidx = (scan_begin + threadIdx.x) - 1; float fvalue; if (split_gidx < static_cast<int>(gidx_begin)) { fvalue = matrix.min_fvalue[fidx]; } else { fvalue = matrix.gidx_fvalue_map[split_gidx]; } GradientSumT left = missing_left ? bin + missing : bin; GradientSumT right = parent_sum - left; best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue, fidx, GradientPair(left), GradientPair(right), param); } __syncthreads(); } } template <int BLOCK_THREADS, typename GradientSumT> __global__ void EvaluateSplitKernel( common::Span<const GradientSumT> node_histogram, // histogram for gradients common::Span<const int> feature_set, // Selected features DeviceNodeStats node, ELLPackMatrix matrix, GPUTrainingParam gpu_param, common::Span<DeviceSplitCandidate> split_candidates, // resulting split ValueConstraint value_constraint, common::Span<int> d_monotonic_constraints) { // KeyValuePair here used as threadIdx.x -> gain_value using ArgMaxT = hipcub::KeyValuePair<int, float>; using BlockScanT = hipcub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>; using MaxReduceT = hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS>; using SumReduceT = hipcub::BlockReduce<GradientSumT, BLOCK_THREADS>; union TempStorage { typename BlockScanT::TempStorage scan; typename MaxReduceT::TempStorage max_reduce; typename SumReduceT::TempStorage sum_reduce; }; // Aligned && shared storage for best_split __shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split; DeviceSplitCandidate& best_split = uninitialized_split.Alias(); __shared__ TempStorage temp_storage; if (threadIdx.x == 0) { best_split = DeviceSplitCandidate(); } __syncthreads(); // One block for each feature. Features are sampled, so fidx != blockIdx.x int fidx = feature_set[blockIdx.x]; int constraint = d_monotonic_constraints[fidx]; EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>( fidx, node_histogram, matrix, &best_split, node, gpu_param, &temp_storage, constraint, value_constraint); __syncthreads(); if (threadIdx.x == 0) { // Record best loss for each feature split_candidates[blockIdx.x] = best_split; } } /** * \struct DeviceHistogram * * \summary Data storage for node histograms on device. Automatically expands. * * \tparam GradientSumT histogram entry type. * \tparam kStopGrowingSize Do not grow beyond this size * * \author Rory * \date 28/07/2018 */ template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26> class DeviceHistogram { private: /*! \brief Map nidx to starting index of its histogram. */ std::map<int, size_t> nidx_map_; dh::device_vector<typename GradientSumT::ValueT> data_; int n_bins_; int device_id_; static constexpr size_t kNumItemsInGradientSum = sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT); static_assert(kNumItemsInGradientSum == 2, "Number of items in gradient type should be 2."); public: void Init(int device_id, int n_bins) { this->n_bins_ = n_bins; this->device_id_ = device_id; } void Reset() { dh::safe_cuda(hipMemsetAsync( data_.data().get(), 0, data_.size() * sizeof(typename decltype(data_)::value_type))); nidx_map_.clear(); } bool HistogramExists(int nidx) const { return nidx_map_.find(nidx) != nidx_map_.cend(); } size_t HistogramSize() const { return n_bins_ * kNumItemsInGradientSum; } dh::device_vector<typename GradientSumT::ValueT>& Data() { return data_; } void AllocateHistogram(int nidx) { if (HistogramExists(nidx)) return; // Number of items currently used in data const size_t used_size = nidx_map_.size() * HistogramSize(); const size_t new_used_size = used_size + HistogramSize(); dh::safe_cuda(hipSetDevice(device_id_)); if (data_.size() >= kStopGrowingSize) { // Recycle histogram memory if (new_used_size <= data_.size()) { // no need to remove old node, just insert the new one. nidx_map_[nidx] = used_size; // memset histogram size in bytes dh::safe_cuda(hipMemsetAsync(data_.data().get() + used_size, 0, n_bins_ * sizeof(GradientSumT))); } else { std::pair<int, size_t> old_entry = *nidx_map_.begin(); nidx_map_.erase(old_entry.first); dh::safe_cuda(hipMemsetAsync(data_.data().get() + old_entry.second, 0, n_bins_ * sizeof(GradientSumT))); nidx_map_[nidx] = old_entry.second; } } else { // Append new node histogram nidx_map_[nidx] = used_size; size_t new_required_memory = ::max(data_.size() * 2, HistogramSize()); if (data_.size() < new_required_memory) { data_.resize(new_required_memory); } } } /** * \summary Return pointer to histogram memory for a given node. * \param nidx Tree node index. * \return hist pointer. */ common::Span<GradientSumT> GetNodeHistogram(int nidx) { CHECK(this->HistogramExists(nidx)); auto ptr = data_.data().get() + nidx_map_[nidx]; return common::Span<GradientSumT>( reinterpret_cast<GradientSumT*>(ptr), n_bins_); } }; struct CalcWeightTrainParam { float min_child_weight; float reg_alpha; float reg_lambda; float max_delta_step; float learning_rate; XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p) : min_child_weight(p.min_child_weight), reg_alpha(p.reg_alpha), reg_lambda(p.reg_lambda), max_delta_step(p.max_delta_step), learning_rate(p.learning_rate) {} }; // Bin each input data entry, store the bin indices in compressed form. template<typename std::enable_if<true, int>::type = 0> __global__ void CompressBinEllpackKernel( common::CompressedBufferWriter wr, common::CompressedByteT* __restrict__ buffer, // gidx_buffer const size_t* __restrict__ row_ptrs, // row offset of input data const Entry* __restrict__ entries, // One batch of input data const float* __restrict__ cuts, // HistogramCuts::cut const uint32_t* __restrict__ cut_rows, // HistogramCuts::row_ptrs size_t base_row, // batch_row_begin size_t n_rows, size_t row_stride, unsigned int null_gidx_value) { size_t irow = threadIdx.x + blockIdx.x * blockDim.x; int ifeature = threadIdx.y + blockIdx.y * blockDim.y; if (irow >= n_rows || ifeature >= row_stride) { return; } int row_length = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]); unsigned int bin = null_gidx_value; if (ifeature < row_length) { Entry entry = entries[row_ptrs[irow] - row_ptrs[0] + ifeature]; int feature = entry.index; float fvalue = entry.fvalue; // {feature_cuts, ncuts} forms the array of cuts of `feature'. const float *feature_cuts = &cuts[cut_rows[feature]]; int ncuts = cut_rows[feature + 1] - cut_rows[feature]; // Assigning the bin in current entry. // S.t.: fvalue < feature_cuts[bin] bin = dh::UpperBound(feature_cuts, ncuts, fvalue); if (bin >= ncuts) { bin = ncuts - 1; } // Add the number of bins in previous features. bin += cut_rows[feature]; } // Write to gidx buffer. wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature); } template <typename GradientSumT> __global__ void SharedMemHistKernel(ELLPackMatrix matrix, common::Span<const RowPartitioner::RowIndexT> d_ridx, GradientSumT* d_node_hist, const GradientPair* d_gpair, size_t n_elements, bool use_shared_memory_histograms) { extern __shared__ char smem[]; GradientSumT* smem_arr = reinterpret_cast<GradientSumT*>(smem); // NOLINT if (use_shared_memory_histograms) { dh::BlockFill(smem_arr, matrix.BinCount(), GradientSumT()); __syncthreads(); } for (auto idx : dh::GridStrideRange(static_cast<size_t>(0), n_elements)) { int ridx = d_ridx[idx / matrix.row_stride ]; int gidx = matrix.gidx_iter[ridx * matrix.row_stride + idx % matrix.row_stride]; if (gidx != matrix.null_gidx_value) { // If we are not using shared memory, accumulate the values directly into // global memory GradientSumT* atomic_add_ptr = use_shared_memory_histograms ? smem_arr : d_node_hist; AtomicAddGpair(atomic_add_ptr + gidx, d_gpair[ridx]); } } if (use_shared_memory_histograms) { // Write shared memory back to global memory __syncthreads(); for (auto i : dh::BlockStrideRange(static_cast<size_t>(0), matrix.BinCount())) { AtomicAddGpair(d_node_hist + i, smem_arr[i]); } } } // Instances of this type are created while creating the histogram bins for the // entire dataset across multiple sparse page batches. This keeps track of the number // of rows to process from a batch and the position from which to process on each device. struct RowStateOnDevice { // Number of rows assigned to this device const size_t total_rows_assigned_to_device; // Number of rows processed thus far size_t total_rows_processed; // Number of rows to process from the current sparse page batch size_t rows_to_process_from_batch; // Offset from the current sparse page batch to begin processing size_t row_offset_in_current_batch; explicit RowStateOnDevice(size_t total_rows) : total_rows_assigned_to_device(total_rows), total_rows_processed(0), rows_to_process_from_batch(0), row_offset_in_current_batch(0) { } explicit RowStateOnDevice(size_t total_rows, size_t batch_rows) : total_rows_assigned_to_device(total_rows), total_rows_processed(0), rows_to_process_from_batch(batch_rows), row_offset_in_current_batch(0) { } // Advance the row state by the number of rows processed void Advance() { total_rows_processed += rows_to_process_from_batch; CHECK_LE(total_rows_processed, total_rows_assigned_to_device); rows_to_process_from_batch = row_offset_in_current_batch = 0; } }; // Manage memory for a single GPU template <typename GradientSumT> struct DeviceShard { int n_bins; int device_id; int shard_idx; // Position in the local array of shards dh::BulkAllocator ba; ELLPackMatrix ellpack_matrix; std::unique_ptr<RowPartitioner> row_partitioner; DeviceHistogram<GradientSumT> hist; /*! \brief row_ptr form HistogramCuts. */ common::Span<uint32_t> feature_segments; /*! \brief minimum value for each feature. */ common::Span<bst_float> min_fvalue; /*! \brief Cut. */ common::Span<bst_float> gidx_fvalue_map; /*! \brief global index of histogram, which is stored in ELLPack format. */ common::Span<common::CompressedByteT> gidx_buffer; /*! \brief Gradient pair for each row. */ common::Span<GradientPair> gpair; common::Span<int> monotone_constraints; common::Span<bst_float> prediction_cache; /*! \brief Sum gradient for each node. */ std::vector<GradientPair> node_sum_gradients; common::Span<GradientPair> node_sum_gradients_d; /*! The row offset for this shard. */ bst_uint row_begin_idx; bst_uint row_end_idx; bst_uint n_rows; TrainParam param; bool prediction_cache_initialised; bool use_shared_memory_histograms {false}; dh::CubMemory temp_memory; dh::PinnedMemory pinned_memory; std::vector<hipStream_t> streams; common::Monitor monitor; std::vector<ValueConstraint> node_value_constraints; common::ColumnSampler column_sampler; FeatureInteractionConstraint interaction_constraints; using ExpandQueue = std::priority_queue<ExpandEntry, std::vector<ExpandEntry>, std::function<bool(ExpandEntry, ExpandEntry)>>; std::unique_ptr<ExpandQueue> qexpand; DeviceShard(int _device_id, int shard_idx, bst_uint row_begin, bst_uint row_end, TrainParam _param, uint32_t column_sampler_seed, uint32_t n_features) : device_id(_device_id), shard_idx(shard_idx), row_begin_idx(row_begin), row_end_idx(row_end), n_rows(row_end - row_begin), n_bins(0), param(std::move(_param)), prediction_cache_initialised(false), column_sampler(column_sampler_seed), interaction_constraints(param, n_features) { monitor.Init(std::string("DeviceShard") + std::to_string(device_id)); } void InitCompressedData( const common::HistogramCuts& hmat, size_t row_stride, bool is_dense); void CreateHistIndices( const SparsePage &row_batch, const common::HistogramCuts &hmat, const RowStateOnDevice &device_row_state, int rows_per_batch); ~DeviceShard() { dh::safe_cuda(hipSetDevice(device_id)); for (auto& stream : streams) { dh::safe_cuda(hipStreamDestroy(stream)); } } // Get vector of at least n initialised streams std::vector<hipStream_t>& GetStreams(int n) { if (n > streams.size()) { for (auto& stream : streams) { dh::safe_cuda(hipStreamDestroy(stream)); } streams.clear(); streams.resize(n); for (auto& stream : streams) { dh::safe_cuda(hipStreamCreate(&stream)); } } return streams; } // Reset values for each update iteration // Note that the column sampler must be passed by value because it is not // thread safe void Reset(HostDeviceVector<GradientPair>* dh_gpair, int64_t num_columns) { if (param.grow_policy == TrainParam::kLossGuide) { qexpand.reset(new ExpandQueue(LossGuide)); } else { qexpand.reset(new ExpandQueue(DepthWise)); } this->column_sampler.Init(num_columns, param.colsample_bynode, param.colsample_bylevel, param.colsample_bytree); dh::safe_cuda(hipSetDevice(device_id)); this->interaction_constraints.Reset(); std::fill(node_sum_gradients.begin(), node_sum_gradients.end(), GradientPair()); row_partitioner.reset(); // Release the device memory first before reallocating row_partitioner.reset(new RowPartitioner(device_id, n_rows)); dh::safe_cuda(hipMemcpyAsync( gpair.data(), dh_gpair->ConstDevicePointer(device_id), gpair.size() * sizeof(GradientPair), hipMemcpyHostToHost)); SubsampleGradientPair(device_id, gpair, param.subsample, row_begin_idx); hist.Reset(); } std::vector<DeviceSplitCandidate> EvaluateSplits( std::vector<int> nidxs, const RegTree& tree, size_t num_columns) { dh::safe_cuda(hipSetDevice(device_id)); auto result_all = pinned_memory.GetSpan<DeviceSplitCandidate>(nidxs.size()); // Work out cub temporary memory requirement GPUTrainingParam gpu_param(param); DeviceSplitCandidateReduceOp op(gpu_param); size_t temp_storage_bytes = 0; DeviceSplitCandidate*dummy = nullptr; hipcub::DeviceReduce::Reduce( nullptr, temp_storage_bytes, dummy, dummy, num_columns, op, DeviceSplitCandidate()); // size in terms of DeviceSplitCandidate size_t cub_memory_size = ::ceil(static_cast<double>(temp_storage_bytes) / sizeof(DeviceSplitCandidate)); // Allocate enough temporary memory // Result for each nidx // + intermediate result for each column // + cub reduce memory auto temp_span = temp_memory.GetSpan<DeviceSplitCandidate>( nidxs.size() + nidxs.size() * num_columns +cub_memory_size*nidxs.size()); auto d_result_all = temp_span.subspan(0, nidxs.size()); auto d_split_candidates_all = temp_span.subspan(d_result_all.size(), nidxs.size() * num_columns); auto d_cub_memory_all = temp_span.subspan(d_result_all.size() + d_split_candidates_all.size(), cub_memory_size * nidxs.size()); auto& streams = this->GetStreams(nidxs.size()); for (auto i = 0ull; i < nidxs.size(); i++) { auto nidx = nidxs[i]; auto p_feature_set = column_sampler.GetFeatureSet(tree.GetDepth(nidx)); p_feature_set->Shard(GPUSet(device_id, 1)); auto d_sampled_features = p_feature_set->DeviceSpan(device_id); common::Span<int32_t> d_feature_set = interaction_constraints.Query(d_sampled_features, nidx); auto d_split_candidates = d_split_candidates_all.subspan(i * num_columns, d_feature_set.size()); DeviceNodeStats node(node_sum_gradients[nidx], nidx, param); auto d_result = d_result_all.subspan(i, 1); if (d_feature_set.size() == 0) { // Acting as a device side constructor for DeviceSplitCandidate. // DeviceSplitCandidate::IsValid is false so that ApplySplit can reject this // candidate. auto worst_candidate = DeviceSplitCandidate(); dh::safe_cuda(hipMemcpyAsync(d_result.data(), &worst_candidate, sizeof(DeviceSplitCandidate), hipMemcpyHostToDevice)); continue; } // One block for each feature int constexpr kBlockThreads = 256; hipLaunchKernelGGL(( EvaluateSplitKernel<kBlockThreads, GradientSumT>) , dim3(uint32_t(d_feature_set.size())), dim3(kBlockThreads), 0, streams[i], hist.GetNodeHistogram(nidx), d_feature_set, node, ellpack_matrix, gpu_param, d_split_candidates, node_value_constraints[nidx], monotone_constraints); // Reduce over features to find best feature auto d_cub_memory = d_cub_memory_all.subspan(i * cub_memory_size, cub_memory_size); size_t cub_bytes = d_cub_memory.size() * sizeof(DeviceSplitCandidate); hipcub::DeviceReduce::Reduce(reinterpret_cast<void*>(d_cub_memory.data()), cub_bytes, d_split_candidates.data(), d_result.data(), d_split_candidates.size(), op, DeviceSplitCandidate(), streams[i]); } dh::safe_cuda(hipMemcpy(result_all.data(), d_result_all.data(), sizeof(DeviceSplitCandidate) * d_result_all.size(), hipMemcpyDeviceToHost)); return std::vector<DeviceSplitCandidate>(result_all.begin(), result_all.end()); } void BuildHist(int nidx) { hist.AllocateHistogram(nidx); auto d_node_hist = hist.GetNodeHistogram(nidx); auto d_ridx = row_partitioner->GetRows(nidx); auto d_gpair = gpair.data(); auto n_elements = d_ridx.size() * ellpack_matrix.row_stride; const size_t smem_size = use_shared_memory_histograms ? sizeof(GradientSumT) * ellpack_matrix.BinCount() : 0; const int items_per_thread = 8; const int block_threads = 256; const int grid_size = static_cast<int>( common::DivRoundUp(n_elements, items_per_thread * block_threads)); if (grid_size <= 0) { return; } hipLaunchKernelGGL(( SharedMemHistKernel), dim3(grid_size), dim3(block_threads), smem_size, 0, ellpack_matrix, d_ridx, d_node_hist.data(), d_gpair, n_elements, use_shared_memory_histograms); } void SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) { auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent); auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram); auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction); dh::LaunchN(device_id, n_bins, [=] __device__(size_t idx) { d_node_hist_subtraction[idx] = d_node_hist_parent[idx] - d_node_hist_histogram[idx]; }); } bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) { // Make sure histograms are already allocated hist.AllocateHistogram(nidx_subtraction); return hist.HistogramExists(nidx_histogram) && hist.HistogramExists(nidx_parent); } void UpdatePosition(int nidx, RegTree::Node split_node) { auto d_matrix = ellpack_matrix; row_partitioner->UpdatePosition( nidx, split_node.LeftChild(), split_node.RightChild(), [=] __device__(bst_uint ridx) { bst_float element = d_matrix.GetElement(ridx, split_node.SplitIndex()); // Missing value int new_position = 0; if (isnan(element)) { new_position = split_node.DefaultChild(); } else { if (element <= split_node.SplitCond()) { new_position = split_node.LeftChild(); } else { new_position = split_node.RightChild(); } } return new_position; }); } // After tree update is finished, update the position of all training // instances to their final leaf This information is used later to update the // prediction cache void FinalisePosition(RegTree* p_tree) { const auto d_nodes = temp_memory.GetSpan<RegTree::Node>(p_tree->GetNodes().size()); dh::safe_cuda(hipMemcpy(d_nodes.data(), p_tree->GetNodes().data(), d_nodes.size() * sizeof(RegTree::Node), hipMemcpyHostToDevice)); auto d_matrix = ellpack_matrix; row_partitioner->FinalisePosition( [=] __device__(bst_uint ridx, int position) { auto node = d_nodes[position]; while (!node.IsLeaf()) { bst_float element = d_matrix.GetElement(ridx, node.SplitIndex()); // Missing value if (isnan(element)) { position = node.DefaultChild(); } else { if (element <= node.SplitCond()) { position = node.LeftChild(); } else { position = node.RightChild(); } } node = d_nodes[position]; } return position; }); } void UpdatePredictionCache(bst_float* out_preds_d) { dh::safe_cuda(hipSetDevice(device_id)); if (!prediction_cache_initialised) { dh::safe_cuda(hipMemcpyAsync(prediction_cache.data(), out_preds_d, prediction_cache.size() * sizeof(bst_float), hipMemcpyDefault)); } prediction_cache_initialised = true; CalcWeightTrainParam param_d(param); dh::safe_cuda( hipMemcpyAsync(node_sum_gradients_d.data(), node_sum_gradients.data(), sizeof(GradientPair) * node_sum_gradients.size(), hipMemcpyHostToDevice)); auto d_position = row_partitioner->GetPosition(); auto d_ridx = row_partitioner->GetRows(); auto d_node_sum_gradients = node_sum_gradients_d.data(); auto d_prediction_cache = prediction_cache.data(); dh::LaunchN( device_id, prediction_cache.size(), [=] __device__(int local_idx) { int pos = d_position[local_idx]; bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]); d_prediction_cache[d_ridx[local_idx]] += weight * param_d.learning_rate; }); dh::safe_cuda(hipMemcpy( out_preds_d, prediction_cache.data(), prediction_cache.size() * sizeof(bst_float), hipMemcpyDefault)); row_partitioner.reset(); } void AllReduceHist(int nidx, dh::AllReducer* reducer) { monitor.StartCuda("AllReduce"); auto d_node_hist = hist.GetNodeHistogram(nidx).data(); reducer->AllReduceSum( shard_idx, reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist), reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist), ellpack_matrix.BinCount() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT))); reducer->Synchronize(device_id); monitor.StopCuda("AllReduce"); } /** * \brief Build GPU local histograms for the left and right child of some parent node */ void BuildHistLeftRight(int nidx_parent, int nidx_left, int nidx_right, dh::AllReducer* reducer) { auto build_hist_nidx = nidx_left; auto subtraction_trick_nidx = nidx_right; auto left_node_rows = row_partitioner->GetRows(nidx_left).size(); auto right_node_rows = row_partitioner->GetRows(nidx_right).size(); // Decide whether to build the left histogram or right histogram // Find the largest number of training instances on any given Shard // Assume this will be the bottleneck and avoid building this node if // possible std::vector<size_t> max_reduce; max_reduce.push_back(left_node_rows); max_reduce.push_back(right_node_rows); reducer->HostMaxAllReduce(&max_reduce); bool fewer_right = max_reduce[1] < max_reduce[0]; if (fewer_right) { std::swap(build_hist_nidx, subtraction_trick_nidx); } this->BuildHist(build_hist_nidx); this->AllReduceHist(build_hist_nidx, reducer); // Check whether we can use the subtraction trick to calculate the other bool do_subtraction_trick = this->CanDoSubtractionTrick( nidx_parent, build_hist_nidx, subtraction_trick_nidx); if (do_subtraction_trick) { // Calculate other histogram using subtraction trick this->SubtractionTrick(nidx_parent, build_hist_nidx, subtraction_trick_nidx); } else { // Calculate other histogram manually this->BuildHist(subtraction_trick_nidx); this->AllReduceHist(subtraction_trick_nidx, reducer); } } void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) { RegTree& tree = *p_tree; GradStats left_stats; left_stats.Add(candidate.split.left_sum); GradStats right_stats; right_stats.Add(candidate.split.right_sum); GradStats parent_sum; parent_sum.Add(left_stats); parent_sum.Add(right_stats); node_value_constraints.resize(tree.GetNodes().size()); auto base_weight = node_value_constraints[candidate.nid].CalcWeight(param, parent_sum); auto left_weight = node_value_constraints[candidate.nid].CalcWeight(param, left_stats)*param.learning_rate; auto right_weight = node_value_constraints[candidate.nid].CalcWeight(param, right_stats)*param.learning_rate; tree.ExpandNode(candidate.nid, candidate.split.findex, candidate.split.fvalue, candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_sum.sum_hess); // Set up child constraints node_value_constraints.resize(tree.GetNodes().size()); node_value_constraints[candidate.nid].SetChild( param, tree[candidate.nid].SplitIndex(), left_stats, right_stats, &node_value_constraints[tree[candidate.nid].LeftChild()], &node_value_constraints[tree[candidate.nid].RightChild()]); node_sum_gradients[tree[candidate.nid].LeftChild()] = candidate.split.left_sum; node_sum_gradients[tree[candidate.nid].RightChild()] = candidate.split.right_sum; interaction_constraints.Split(candidate.nid, tree[candidate.nid].SplitIndex(), tree[candidate.nid].LeftChild(), tree[candidate.nid].RightChild()); } void InitRoot(RegTree* p_tree, HostDeviceVector<GradientPair>* gpair_all, dh::AllReducer* reducer, int64_t num_columns) { constexpr int kRootNIdx = 0; const auto &gpair = gpair_all->DeviceSpan(device_id); dh::SumReduction(temp_memory, gpair, node_sum_gradients_d, gpair.size()); reducer->AllReduceSum( shard_idx, reinterpret_cast<float*>(node_sum_gradients_d.data()), reinterpret_cast<float*>(node_sum_gradients_d.data()), 2); reducer->Synchronize(device_id); dh::safe_cuda(hipMemcpy(node_sum_gradients.data(), node_sum_gradients_d.data(), sizeof(GradientPair), hipMemcpyDeviceToHost)); this->BuildHist(kRootNIdx); this->AllReduceHist(kRootNIdx, reducer); // Remember root stats p_tree->Stat(kRootNIdx).sum_hess = node_sum_gradients[kRootNIdx].GetHess(); auto weight = CalcWeight(param, node_sum_gradients[kRootNIdx]); p_tree->Stat(kRootNIdx).base_weight = weight; (*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight); // Initialise root constraint node_value_constraints.resize(p_tree->GetNodes().size()); // Generate first split auto split = this->EvaluateSplits({kRootNIdx}, *p_tree, num_columns); qexpand->push( ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split.at(0), 0)); } void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat, RegTree* p_tree, dh::AllReducer* reducer) { auto& tree = *p_tree; monitor.StartCuda("Reset"); this->Reset(gpair_all, p_fmat->Info().num_col_); monitor.StopCuda("Reset"); monitor.StartCuda("InitRoot"); this->InitRoot(p_tree, gpair_all, reducer, p_fmat->Info().num_col_); monitor.StopCuda("InitRoot"); auto timestamp = qexpand->size(); auto num_leaves = 1; while (!qexpand->empty()) { ExpandEntry candidate = qexpand->top(); qexpand->pop(); if (!candidate.IsValid(param, num_leaves)) { continue; } this->ApplySplit(candidate, p_tree); num_leaves++; int left_child_nidx = tree[candidate.nid].LeftChild(); int right_child_nidx = tree[candidate.nid].RightChild(); // Only create child entries if needed if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx), num_leaves)) { monitor.StartCuda("UpdatePosition"); this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]); monitor.StopCuda("UpdatePosition"); monitor.StartCuda("BuildHist"); this->BuildHistLeftRight(candidate.nid, left_child_nidx, right_child_nidx, reducer); monitor.StopCuda("BuildHist"); monitor.StartCuda("EvaluateSplits"); auto splits = this->EvaluateSplits({left_child_nidx, right_child_nidx}, *p_tree, p_fmat->Info().num_col_); monitor.StopCuda("EvaluateSplits"); qexpand->push(ExpandEntry(left_child_nidx, tree.GetDepth(left_child_nidx), splits.at(0), timestamp++)); qexpand->push(ExpandEntry(right_child_nidx, tree.GetDepth(right_child_nidx), splits.at(1), timestamp++)); } } monitor.StartCuda("FinalisePosition"); this->FinalisePosition(p_tree); monitor.StopCuda("FinalisePosition"); } }; template <typename GradientSumT> inline void DeviceShard<GradientSumT>::InitCompressedData( const common::HistogramCuts &hmat, size_t row_stride, bool is_dense) { n_bins = hmat.Ptrs().back(); int null_gidx_value = hmat.Ptrs().back(); CHECK(!(param.max_leaves == 0 && param.max_depth == 0)) << "Max leaves and max depth cannot both be unconstrained for " "gpu_hist."; int max_nodes = param.max_leaves > 0 ? param.max_leaves * 2 : MaxNodesDepth(param.max_depth); ba.Allocate(device_id, &gpair, n_rows, &prediction_cache, n_rows, &node_sum_gradients_d, max_nodes, &feature_segments, hmat.Ptrs().size(), &gidx_fvalue_map, hmat.Values().size(), &min_fvalue, hmat.MinValues().size(), &monotone_constraints, param.monotone_constraints.size()); dh::CopyVectorToDeviceSpan(gidx_fvalue_map, hmat.Values()); dh::CopyVectorToDeviceSpan(min_fvalue, hmat.MinValues()); dh::CopyVectorToDeviceSpan(feature_segments, hmat.Ptrs()); dh::CopyVectorToDeviceSpan(monotone_constraints, param.monotone_constraints); node_sum_gradients.resize(max_nodes); // allocate compressed bin data int num_symbols = n_bins + 1; // Required buffer size for storing data matrix in ELLPack format. size_t compressed_size_bytes = common::CompressedBufferWriter::CalculateBufferSize(row_stride * n_rows, num_symbols); ba.Allocate(device_id, &gidx_buffer, compressed_size_bytes); thrust::fill( thrust::device_pointer_cast(gidx_buffer.data()), thrust::device_pointer_cast(gidx_buffer.data() + gidx_buffer.size()), 0); ellpack_matrix.Init( feature_segments, min_fvalue, gidx_fvalue_map, row_stride, common::CompressedIterator<uint32_t>(gidx_buffer.data(), num_symbols), is_dense, null_gidx_value); // check if we can use shared memory for building histograms // (assuming atleast we need 2 CTAs per SM to maintain decent latency // hiding) auto histogram_size = sizeof(GradientSumT) * hmat.Ptrs().back(); auto max_smem = dh::MaxSharedMemory(device_id); if (histogram_size <= max_smem) { use_shared_memory_histograms = true; } // Init histogram hist.Init(device_id, hmat.Ptrs().back()); } template <typename GradientSumT> inline void DeviceShard<GradientSumT>::CreateHistIndices( const SparsePage &row_batch, const common::HistogramCuts &hmat, const RowStateOnDevice &device_row_state, int rows_per_batch) { // Has any been allocated for me in this batch? if (!device_row_state.rows_to_process_from_batch) return; unsigned int null_gidx_value = hmat.Ptrs().back(); size_t row_stride = this->ellpack_matrix.row_stride; const auto &offset_vec = row_batch.offset.ConstHostVector(); int num_symbols = n_bins + 1; // bin and compress entries in batches of rows size_t gpu_batch_nrows = ::min( dh::TotalMemory(device_id) / (16 * row_stride * sizeof(Entry)), static_cast<size_t>(device_row_state.rows_to_process_from_batch)); const std::vector<Entry>& data_vec = row_batch.data.ConstHostVector(); size_t gpu_nbatches = common::DivRoundUp(device_row_state.rows_to_process_from_batch, gpu_batch_nrows); for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) { size_t batch_row_begin = gpu_batch * gpu_batch_nrows; size_t batch_row_end = (gpu_batch + 1) * gpu_batch_nrows; if (batch_row_end > device_row_state.rows_to_process_from_batch) { batch_row_end = device_row_state.rows_to_process_from_batch; } size_t batch_nrows = batch_row_end - batch_row_begin; const auto ent_cnt_begin = offset_vec[device_row_state.row_offset_in_current_batch + batch_row_begin]; const auto ent_cnt_end = offset_vec[device_row_state.row_offset_in_current_batch + batch_row_end]; /*! \brief row offset in SparsePage (the input data). */ dh::device_vector<size_t> row_ptrs(batch_nrows+1); thrust::copy( offset_vec.data() + device_row_state.row_offset_in_current_batch + batch_row_begin, offset_vec.data() + device_row_state.row_offset_in_current_batch + batch_row_end + 1, row_ptrs.begin()); // number of entries in this batch. size_t n_entries = ent_cnt_end - ent_cnt_begin; dh::device_vector<Entry> entries_d(n_entries); // copy data entries to device. dh::safe_cuda (hipMemcpy (entries_d.data().get(), data_vec.data() + ent_cnt_begin, n_entries * sizeof(Entry), hipMemcpyDefault)); const dim3 block3(32, 8, 1); // 256 threads const dim3 grid3(common::DivRoundUp(batch_nrows, block3.x), common::DivRoundUp(row_stride, block3.y), 1); hipLaunchKernelGGL(( CompressBinEllpackKernel), dim3(grid3), dim3(block3), 0, 0, common::CompressedBufferWriter(num_symbols), gidx_buffer.data(), row_ptrs.data().get(), entries_d.data().get(), gidx_fvalue_map.data(), feature_segments.data(), device_row_state.total_rows_processed + batch_row_begin, batch_nrows, row_stride, null_gidx_value); } } // An instance of this type is created which keeps track of total number of rows to process, // rows processed thus far, rows to process and the offset from the current sparse page batch // to begin processing on each device class DeviceHistogramBuilderState { public: template <typename GradientSumT> explicit DeviceHistogramBuilderState( const std::vector<std::unique_ptr<DeviceShard<GradientSumT>>> &shards) { device_row_states_.reserve(shards.size()); for (const auto &shard : shards) { device_row_states_.push_back(RowStateOnDevice(shard->n_rows)); } } const RowStateOnDevice &GetRowStateOnDevice(int idx) const { return device_row_states_[idx]; } // This method is invoked at the beginning of each sparse page batch. This distributes // the rows in the sparse page to the different devices. // TODO(sriramch): Think of a way to utilize *all* the GPUs to build the compressed bins. void BeginBatch(const SparsePage &batch) { size_t rem_rows = batch.Size(); size_t row_offset_in_current_batch = 0; for (auto &device_row_state : device_row_states_) { // Do we have anymore left to process from this batch on this device? if (device_row_state.total_rows_assigned_to_device > device_row_state.total_rows_processed) { // There are still some rows that needs to be assigned to this device device_row_state.rows_to_process_from_batch = ::min( device_row_state.total_rows_assigned_to_device - device_row_state.total_rows_processed, rem_rows); } else { // All rows have been assigned to this device device_row_state.rows_to_process_from_batch = 0; } device_row_state.row_offset_in_current_batch = row_offset_in_current_batch; row_offset_in_current_batch += device_row_state.rows_to_process_from_batch; rem_rows -= device_row_state.rows_to_process_from_batch; } } // This method is invoked after completion of each sparse page batch void EndBatch() { for (auto &rs : device_row_states_) { rs.Advance(); } } private: std::vector<RowStateOnDevice> device_row_states_; }; template <typename GradientSumT> class GPUHistMakerSpecialised { public: GPUHistMakerSpecialised() : initialised_{false}, p_last_fmat_{nullptr} {} void Init(const std::vector<std::pair<std::string, std::string>>& args, LearnerTrainParam const* lparam) { param_.InitAllowUnknown(args); learner_param_ = lparam; hist_maker_param_.InitAllowUnknown(args); auto devices = GPUSet::All(learner_param_->gpu_id, learner_param_->n_gpus); n_devices_ = devices.Size(); CHECK(n_devices_ != 0) << "Must have at least one device"; dist_ = GPUDistribution::Block(devices); dh::CheckComputeCapability(); monitor_.Init("updater_gpu_hist"); } ~GPUHistMakerSpecialised() { dh::GlobalMemoryLogger().Log(); } void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) { monitor_.StartCuda("Update"); // rescale learning rate according to size of trees float lr = param_.learning_rate; param_.learning_rate = lr / trees.size(); ValueConstraint::Init(&param_, dmat->Info().num_col_); // build tree try { for (xgboost::RegTree* tree : trees) { this->UpdateTree(gpair, dmat, tree); } dh::safe_cuda(hipGetLastError()); } catch (const std::exception& e) { LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl; } param_.learning_rate = lr; monitor_.StopCuda("Update"); } void InitDataOnce(DMatrix* dmat) { info_ = &dmat->Info(); int n_devices = dist_.Devices().Size(); device_list_.resize(n_devices); for (int index = 0; index < n_devices; ++index) { int device_id = dist_.Devices().DeviceId(index); device_list_[index] = device_id; } reducer_.Init(device_list_); // Synchronise the column sampling seed uint32_t column_sampling_seed = common::GlobalRandom()(); rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0); // Create device shards shards_.resize(n_devices); dh::ExecuteIndexShards( &shards_, [&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) { dh::safe_cuda(hipSetDevice(dist_.Devices().DeviceId(idx))); size_t start = dist_.ShardStart(info_->num_row_, idx); size_t size = dist_.ShardSize(info_->num_row_, idx); shard = std::unique_ptr<DeviceShard<GradientSumT>>( new DeviceShard<GradientSumT>(dist_.Devices().DeviceId(idx), idx, start, start + size, param_, column_sampling_seed, info_->num_col_)); }); monitor_.StartCuda("Quantiles"); // Create the quantile sketches for the dmatrix and initialize HistogramCuts size_t row_stride = common::DeviceSketch(param_, *learner_param_, hist_maker_param_.gpu_batch_nrows, dmat, &hmat_); monitor_.StopCuda("Quantiles"); n_bins_ = hmat_.Ptrs().back(); auto is_dense = info_->num_nonzero_ == info_->num_row_ * info_->num_col_; // Init global data for each shard monitor_.StartCuda("InitCompressedData"); dh::ExecuteIndexShards( &shards_, [&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) { dh::safe_cuda(hipSetDevice(shard->device_id)); shard->InitCompressedData(hmat_, row_stride, is_dense); }); monitor_.StopCuda("InitCompressedData"); monitor_.StartCuda("BinningCompression"); DeviceHistogramBuilderState hist_builder_row_state(shards_); for (const auto &batch : dmat->GetRowBatches()) { hist_builder_row_state.BeginBatch(batch); dh::ExecuteIndexShards( &shards_, [&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) { dh::safe_cuda(hipSetDevice(shard->device_id)); shard->CreateHistIndices(batch, hmat_, hist_builder_row_state.GetRowStateOnDevice(idx), hist_maker_param_.gpu_batch_nrows); }); hist_builder_row_state.EndBatch(); } monitor_.StopCuda("BinningCompression"); p_last_fmat_ = dmat; initialised_ = true; } void InitData(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat) { if (!initialised_) { monitor_.StartCuda("InitDataOnce"); this->InitDataOnce(dmat); monitor_.StopCuda("InitDataOnce"); } } // Only call this method for testing void CheckTreesSynchronized(const std::vector<RegTree>& local_trees) const { std::string s_model; common::MemoryBufferStream fs(&s_model); int rank = rabit::GetRank(); if (rank == 0) { local_trees.front().Save(&fs); } fs.Seek(0); rabit::Broadcast(&s_model, 0); RegTree reference_tree; reference_tree.Load(&fs); for (const auto& tree : local_trees) { CHECK(tree == reference_tree); } } void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree) { monitor_.StartCuda("InitData"); this->InitData(gpair, p_fmat); monitor_.StopCuda("InitData"); std::vector<RegTree> trees(shards_.size()); for (auto& tree : trees) { tree = *p_tree; } gpair->Reshard(dist_); // Launch one thread for each device "shard" containing a subset of rows. // Threads will cooperatively build the tree, synchronising over histograms. // Each thread will redundantly build its own copy of the tree dh::ExecuteIndexShards( &shards_, [&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) { shard->UpdateTree(gpair, p_fmat, &trees.at(idx), &reducer_); }); // All trees are expected to be identical if (hist_maker_param_.debug_synchronize) { this->CheckTreesSynchronized(trees); } // Write the output tree *p_tree = trees.front(); } bool UpdatePredictionCache( const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) { if (shards_.empty() || p_last_fmat_ == nullptr || p_last_fmat_ != data) { return false; } monitor_.StartCuda("UpdatePredictionCache"); p_out_preds->Shard(dist_.Devices()); dh::ExecuteIndexShards( &shards_, [&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) { dh::safe_cuda(hipSetDevice(shard->device_id)); shard->UpdatePredictionCache( p_out_preds->DevicePointer(shard->device_id)); }); monitor_.StopCuda("UpdatePredictionCache"); return true; } TrainParam param_; // NOLINT common::HistogramCuts hmat_; // NOLINT MetaInfo* info_; // NOLINT std::vector<std::unique_ptr<DeviceShard<GradientSumT>>> shards_; // NOLINT private: bool initialised_; int n_devices_; int n_bins_; GPUHistMakerTrainParam hist_maker_param_; LearnerTrainParam const* learner_param_; dh::AllReducer reducer_; DMatrix* p_last_fmat_; GPUDistribution dist_; common::Monitor monitor_; /*! List storing device id. */ std::vector<int> device_list_; }; class GPUHistMaker : public TreeUpdater { public: void Init( const std::vector<std::pair<std::string, std::string>>& args) override { hist_maker_param_.InitAllowUnknown(args); float_maker_.reset(); double_maker_.reset(); if (hist_maker_param_.single_precision_histogram) { float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>()); float_maker_->Init(args, tparam_); } else { double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>()); double_maker_->Init(args, tparam_); } } void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override { if (hist_maker_param_.single_precision_histogram) { float_maker_->Update(gpair, dmat, trees); } else { double_maker_->Update(gpair, dmat, trees); } } bool UpdatePredictionCache( const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override { if (hist_maker_param_.single_precision_histogram) { return float_maker_->UpdatePredictionCache(data, p_out_preds); } else { return double_maker_->UpdatePredictionCache(data, p_out_preds); } } private: GPUHistMakerTrainParam hist_maker_param_; std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_; std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_; }; #if !defined(GTEST_TEST) XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist") .describe("Grow tree with GPU.") .set_body([]() { return new GPUHistMaker(); }); #endif // !defined(GTEST_TEST) } // namespace tree } // namespace xgboost
2711cf2ebb42d0533ecbbd223fbc0ee97d4cee52.cu
/*! * Copyright 2017-2019 XGBoost contributors */ #include <thrust/copy.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/reduce.h> #include <xgboost/tree_updater.h> #include <algorithm> #include <cmath> #include <memory> #include <limits> #include <queue> #include <utility> #include <vector> #include "../common/common.h" #include "../common/compressed_iterator.h" #include "../common/device_helpers.cuh" #include "../common/hist_util.h" #include "../common/host_device_vector.h" #include "../common/timer.h" #include "../common/span.h" #include "param.h" #include "updater_gpu_common.cuh" #include "constraints.cuh" #include "gpu_hist/row_partitioner.cuh" namespace xgboost { namespace tree { #if !defined(GTEST_TEST) DMLC_REGISTRY_FILE_TAG(updater_gpu_hist); #endif // !defined(GTEST_TEST) // training parameters specific to this algorithm struct GPUHistMakerTrainParam : public dmlc::Parameter<GPUHistMakerTrainParam> { bool single_precision_histogram; // number of rows in a single GPU batch int gpu_batch_nrows; bool debug_synchronize; // declare parameters DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) { DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe( "Use single precision to build histograms."); DMLC_DECLARE_FIELD(gpu_batch_nrows) .set_lower_bound(-1) .set_default(0) .describe("Number of rows in a GPU batch, used for finding quantiles on GPU; " "-1 to use all rows assignted to a GPU, and 0 to auto-deduce"); DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe( "Check if all distributed tree are identical after tree construction."); } }; #if !defined(GTEST_TEST) DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam); #endif // !defined(GTEST_TEST) struct ExpandEntry { int nid; int depth; DeviceSplitCandidate split; uint64_t timestamp; ExpandEntry() = default; ExpandEntry(int nid, int depth, DeviceSplitCandidate split, uint64_t timestamp) : nid(nid), depth(depth), split(std::move(split)), timestamp(timestamp) {} bool IsValid(const TrainParam& param, int num_leaves) const { if (split.loss_chg <= kRtEps) return false; if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) { return false; } if (param.max_depth > 0 && depth == param.max_depth) return false; if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false; return true; } static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) { if (param.max_depth > 0 && depth >= param.max_depth) return false; if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false; return true; } friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) { os << "ExpandEntry: \n"; os << "nidx: " << e.nid << "\n"; os << "depth: " << e.depth << "\n"; os << "loss: " << e.split.loss_chg << "\n"; os << "left_sum: " << e.split.left_sum << "\n"; os << "right_sum: " << e.split.right_sum << "\n"; return os; } }; inline static bool DepthWise(ExpandEntry lhs, ExpandEntry rhs) { if (lhs.depth == rhs.depth) { return lhs.timestamp > rhs.timestamp; // favor small timestamp } else { return lhs.depth > rhs.depth; // favor small depth } } inline static bool LossGuide(ExpandEntry lhs, ExpandEntry rhs) { if (lhs.split.loss_chg == rhs.split.loss_chg) { return lhs.timestamp > rhs.timestamp; // favor small timestamp } else { return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg } } // Find a gidx value for a given feature otherwise return -1 if not found __forceinline__ __device__ int BinarySearchRow( bst_uint begin, bst_uint end, common::CompressedIterator<uint32_t> data, int const fidx_begin, int const fidx_end) { bst_uint previous_middle = UINT32_MAX; while (end != begin) { auto middle = begin + (end - begin) / 2; if (middle == previous_middle) { break; } previous_middle = middle; auto gidx = data[middle]; if (gidx >= fidx_begin && gidx < fidx_end) { return gidx; } else if (gidx < fidx_begin) { begin = middle; } else { end = middle; } } // Value is missing return -1; } /** \brief Struct for accessing and manipulating an ellpack matrix on the * device. Does not own underlying memory and may be trivially copied into * kernels.*/ struct ELLPackMatrix { common::Span<uint32_t> feature_segments; /*! \brief minimum value for each feature. */ common::Span<bst_float> min_fvalue; /*! \brief Cut. */ common::Span<bst_float> gidx_fvalue_map; /*! \brief row length for ELLPack. */ size_t row_stride{0}; common::CompressedIterator<uint32_t> gidx_iter; bool is_dense; int null_gidx_value; XGBOOST_DEVICE size_t BinCount() const { return gidx_fvalue_map.size(); } // Get a matrix element, uses binary search for look up // Return NaN if missing __device__ bst_float GetElement(size_t ridx, size_t fidx) const { auto row_begin = row_stride * ridx; auto row_end = row_begin + row_stride; auto gidx = -1; if (is_dense) { gidx = gidx_iter[row_begin + fidx]; } else { gidx = BinarySearchRow(row_begin, row_end, gidx_iter, feature_segments[fidx], feature_segments[fidx + 1]); } if (gidx == -1) { return nan(""); } return gidx_fvalue_map[gidx]; } void Init(common::Span<uint32_t> feature_segments, common::Span<bst_float> min_fvalue, common::Span<bst_float> gidx_fvalue_map, size_t row_stride, common::CompressedIterator<uint32_t> gidx_iter, bool is_dense, int null_gidx_value) { this->feature_segments = feature_segments; this->min_fvalue = min_fvalue; this->gidx_fvalue_map = gidx_fvalue_map; this->row_stride = row_stride; this->gidx_iter = gidx_iter; this->is_dense = is_dense; this->null_gidx_value = null_gidx_value; } }; // With constraints template <typename GradientPairT> XGBOOST_DEVICE float inline LossChangeMissing( const GradientPairT& scan, const GradientPairT& missing, const GradientPairT& parent_sum, const float& parent_gain, const GPUTrainingParam& param, int constraint, const ValueConstraint& value_constraint, bool& missing_left_out) { // NOLINT float missing_left_gain = value_constraint.CalcSplitGain( param, constraint, GradStats(scan + missing), GradStats(parent_sum - (scan + missing))); float missing_right_gain = value_constraint.CalcSplitGain( param, constraint, GradStats(scan), GradStats(parent_sum - scan)); if (missing_left_gain >= missing_right_gain) { missing_left_out = true; return missing_left_gain - parent_gain; } else { missing_left_out = false; return missing_right_gain - parent_gain; } } /*! * \brief * * \tparam ReduceT BlockReduce Type. * \tparam TempStorage Cub Shared memory * * \param begin * \param end * \param temp_storage Shared memory for intermediate result. */ template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT, typename GradientSumT> __device__ GradientSumT ReduceFeature(common::Span<const GradientSumT> feature_histogram, TempStorageT* temp_storage) { __shared__ cub::Uninitialized<GradientSumT> uninitialized_sum; GradientSumT& shared_sum = uninitialized_sum.Alias(); GradientSumT local_sum = GradientSumT(); // For loop sums features into one block size auto begin = feature_histogram.data(); auto end = begin + feature_histogram.size(); for (auto itr = begin; itr < end; itr += BLOCK_THREADS) { bool thread_active = itr + threadIdx.x < end; // Scan histogram GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT(); local_sum += bin; } local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, cub::Sum()); // Reduction result is stored in thread 0. if (threadIdx.x == 0) { shared_sum = local_sum; } __syncthreads(); return shared_sum; } /*! \brief Find the thread with best gain. */ template <int BLOCK_THREADS, typename ReduceT, typename ScanT, typename MaxReduceT, typename TempStorageT, typename GradientSumT> __device__ void EvaluateFeature( int fidx, common::Span<const GradientSumT> node_histogram, const ELLPackMatrix& matrix, DeviceSplitCandidate* best_split, // shared memory storing best split const DeviceNodeStats& node, const GPUTrainingParam& param, TempStorageT* temp_storage, // temp memory for cub operations int constraint, // monotonic_constraints const ValueConstraint& value_constraint) { // Use pointer from cut to indicate begin and end of bins for each feature. uint32_t gidx_begin = matrix.feature_segments[fidx]; // begining bin uint32_t gidx_end = matrix.feature_segments[fidx + 1]; // end bin for i^th feature // Sum histogram bins for current feature GradientSumT const feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT>( node_histogram.subspan(gidx_begin, gidx_end - gidx_begin), temp_storage); GradientSumT const parent_sum = GradientSumT(node.sum_gradients); GradientSumT const missing = parent_sum - feature_sum; float const null_gain = -std::numeric_limits<bst_float>::infinity(); SumCallbackOp<GradientSumT> prefix_op = SumCallbackOp<GradientSumT>(); for (int scan_begin = gidx_begin; scan_begin < gidx_end; scan_begin += BLOCK_THREADS) { bool thread_active = (scan_begin + threadIdx.x) < gidx_end; // Gradient value for current bin. GradientSumT bin = thread_active ? node_histogram[scan_begin + threadIdx.x] : GradientSumT(); ScanT(temp_storage->scan).ExclusiveScan(bin, bin, cub::Sum(), prefix_op); // Whether the gradient of missing values is put to the left side. bool missing_left = true; float gain = null_gain; if (thread_active) { gain = LossChangeMissing(bin, missing, parent_sum, node.root_gain, param, constraint, value_constraint, missing_left); } __syncthreads(); // Find thread with best gain cub::KeyValuePair<int, float> tuple(threadIdx.x, gain); cub::KeyValuePair<int, float> best = MaxReduceT(temp_storage->max_reduce).Reduce(tuple, cub::ArgMax()); __shared__ cub::KeyValuePair<int, float> block_max; if (threadIdx.x == 0) { block_max = best; } __syncthreads(); // Best thread updates split if (threadIdx.x == block_max.key) { int split_gidx = (scan_begin + threadIdx.x) - 1; float fvalue; if (split_gidx < static_cast<int>(gidx_begin)) { fvalue = matrix.min_fvalue[fidx]; } else { fvalue = matrix.gidx_fvalue_map[split_gidx]; } GradientSumT left = missing_left ? bin + missing : bin; GradientSumT right = parent_sum - left; best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue, fidx, GradientPair(left), GradientPair(right), param); } __syncthreads(); } } template <int BLOCK_THREADS, typename GradientSumT> __global__ void EvaluateSplitKernel( common::Span<const GradientSumT> node_histogram, // histogram for gradients common::Span<const int> feature_set, // Selected features DeviceNodeStats node, ELLPackMatrix matrix, GPUTrainingParam gpu_param, common::Span<DeviceSplitCandidate> split_candidates, // resulting split ValueConstraint value_constraint, common::Span<int> d_monotonic_constraints) { // KeyValuePair here used as threadIdx.x -> gain_value using ArgMaxT = cub::KeyValuePair<int, float>; using BlockScanT = cub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>; using MaxReduceT = cub::BlockReduce<ArgMaxT, BLOCK_THREADS>; using SumReduceT = cub::BlockReduce<GradientSumT, BLOCK_THREADS>; union TempStorage { typename BlockScanT::TempStorage scan; typename MaxReduceT::TempStorage max_reduce; typename SumReduceT::TempStorage sum_reduce; }; // Aligned && shared storage for best_split __shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split; DeviceSplitCandidate& best_split = uninitialized_split.Alias(); __shared__ TempStorage temp_storage; if (threadIdx.x == 0) { best_split = DeviceSplitCandidate(); } __syncthreads(); // One block for each feature. Features are sampled, so fidx != blockIdx.x int fidx = feature_set[blockIdx.x]; int constraint = d_monotonic_constraints[fidx]; EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>( fidx, node_histogram, matrix, &best_split, node, gpu_param, &temp_storage, constraint, value_constraint); __syncthreads(); if (threadIdx.x == 0) { // Record best loss for each feature split_candidates[blockIdx.x] = best_split; } } /** * \struct DeviceHistogram * * \summary Data storage for node histograms on device. Automatically expands. * * \tparam GradientSumT histogram entry type. * \tparam kStopGrowingSize Do not grow beyond this size * * \author Rory * \date 28/07/2018 */ template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26> class DeviceHistogram { private: /*! \brief Map nidx to starting index of its histogram. */ std::map<int, size_t> nidx_map_; dh::device_vector<typename GradientSumT::ValueT> data_; int n_bins_; int device_id_; static constexpr size_t kNumItemsInGradientSum = sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT); static_assert(kNumItemsInGradientSum == 2, "Number of items in gradient type should be 2."); public: void Init(int device_id, int n_bins) { this->n_bins_ = n_bins; this->device_id_ = device_id; } void Reset() { dh::safe_cuda(cudaMemsetAsync( data_.data().get(), 0, data_.size() * sizeof(typename decltype(data_)::value_type))); nidx_map_.clear(); } bool HistogramExists(int nidx) const { return nidx_map_.find(nidx) != nidx_map_.cend(); } size_t HistogramSize() const { return n_bins_ * kNumItemsInGradientSum; } dh::device_vector<typename GradientSumT::ValueT>& Data() { return data_; } void AllocateHistogram(int nidx) { if (HistogramExists(nidx)) return; // Number of items currently used in data const size_t used_size = nidx_map_.size() * HistogramSize(); const size_t new_used_size = used_size + HistogramSize(); dh::safe_cuda(cudaSetDevice(device_id_)); if (data_.size() >= kStopGrowingSize) { // Recycle histogram memory if (new_used_size <= data_.size()) { // no need to remove old node, just insert the new one. nidx_map_[nidx] = used_size; // memset histogram size in bytes dh::safe_cuda(cudaMemsetAsync(data_.data().get() + used_size, 0, n_bins_ * sizeof(GradientSumT))); } else { std::pair<int, size_t> old_entry = *nidx_map_.begin(); nidx_map_.erase(old_entry.first); dh::safe_cuda(cudaMemsetAsync(data_.data().get() + old_entry.second, 0, n_bins_ * sizeof(GradientSumT))); nidx_map_[nidx] = old_entry.second; } } else { // Append new node histogram nidx_map_[nidx] = used_size; size_t new_required_memory = std::max(data_.size() * 2, HistogramSize()); if (data_.size() < new_required_memory) { data_.resize(new_required_memory); } } } /** * \summary Return pointer to histogram memory for a given node. * \param nidx Tree node index. * \return hist pointer. */ common::Span<GradientSumT> GetNodeHistogram(int nidx) { CHECK(this->HistogramExists(nidx)); auto ptr = data_.data().get() + nidx_map_[nidx]; return common::Span<GradientSumT>( reinterpret_cast<GradientSumT*>(ptr), n_bins_); } }; struct CalcWeightTrainParam { float min_child_weight; float reg_alpha; float reg_lambda; float max_delta_step; float learning_rate; XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p) : min_child_weight(p.min_child_weight), reg_alpha(p.reg_alpha), reg_lambda(p.reg_lambda), max_delta_step(p.max_delta_step), learning_rate(p.learning_rate) {} }; // Bin each input data entry, store the bin indices in compressed form. template<typename std::enable_if<true, int>::type = 0> __global__ void CompressBinEllpackKernel( common::CompressedBufferWriter wr, common::CompressedByteT* __restrict__ buffer, // gidx_buffer const size_t* __restrict__ row_ptrs, // row offset of input data const Entry* __restrict__ entries, // One batch of input data const float* __restrict__ cuts, // HistogramCuts::cut const uint32_t* __restrict__ cut_rows, // HistogramCuts::row_ptrs size_t base_row, // batch_row_begin size_t n_rows, size_t row_stride, unsigned int null_gidx_value) { size_t irow = threadIdx.x + blockIdx.x * blockDim.x; int ifeature = threadIdx.y + blockIdx.y * blockDim.y; if (irow >= n_rows || ifeature >= row_stride) { return; } int row_length = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]); unsigned int bin = null_gidx_value; if (ifeature < row_length) { Entry entry = entries[row_ptrs[irow] - row_ptrs[0] + ifeature]; int feature = entry.index; float fvalue = entry.fvalue; // {feature_cuts, ncuts} forms the array of cuts of `feature'. const float *feature_cuts = &cuts[cut_rows[feature]]; int ncuts = cut_rows[feature + 1] - cut_rows[feature]; // Assigning the bin in current entry. // S.t.: fvalue < feature_cuts[bin] bin = dh::UpperBound(feature_cuts, ncuts, fvalue); if (bin >= ncuts) { bin = ncuts - 1; } // Add the number of bins in previous features. bin += cut_rows[feature]; } // Write to gidx buffer. wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature); } template <typename GradientSumT> __global__ void SharedMemHistKernel(ELLPackMatrix matrix, common::Span<const RowPartitioner::RowIndexT> d_ridx, GradientSumT* d_node_hist, const GradientPair* d_gpair, size_t n_elements, bool use_shared_memory_histograms) { extern __shared__ char smem[]; GradientSumT* smem_arr = reinterpret_cast<GradientSumT*>(smem); // NOLINT if (use_shared_memory_histograms) { dh::BlockFill(smem_arr, matrix.BinCount(), GradientSumT()); __syncthreads(); } for (auto idx : dh::GridStrideRange(static_cast<size_t>(0), n_elements)) { int ridx = d_ridx[idx / matrix.row_stride ]; int gidx = matrix.gidx_iter[ridx * matrix.row_stride + idx % matrix.row_stride]; if (gidx != matrix.null_gidx_value) { // If we are not using shared memory, accumulate the values directly into // global memory GradientSumT* atomic_add_ptr = use_shared_memory_histograms ? smem_arr : d_node_hist; AtomicAddGpair(atomic_add_ptr + gidx, d_gpair[ridx]); } } if (use_shared_memory_histograms) { // Write shared memory back to global memory __syncthreads(); for (auto i : dh::BlockStrideRange(static_cast<size_t>(0), matrix.BinCount())) { AtomicAddGpair(d_node_hist + i, smem_arr[i]); } } } // Instances of this type are created while creating the histogram bins for the // entire dataset across multiple sparse page batches. This keeps track of the number // of rows to process from a batch and the position from which to process on each device. struct RowStateOnDevice { // Number of rows assigned to this device const size_t total_rows_assigned_to_device; // Number of rows processed thus far size_t total_rows_processed; // Number of rows to process from the current sparse page batch size_t rows_to_process_from_batch; // Offset from the current sparse page batch to begin processing size_t row_offset_in_current_batch; explicit RowStateOnDevice(size_t total_rows) : total_rows_assigned_to_device(total_rows), total_rows_processed(0), rows_to_process_from_batch(0), row_offset_in_current_batch(0) { } explicit RowStateOnDevice(size_t total_rows, size_t batch_rows) : total_rows_assigned_to_device(total_rows), total_rows_processed(0), rows_to_process_from_batch(batch_rows), row_offset_in_current_batch(0) { } // Advance the row state by the number of rows processed void Advance() { total_rows_processed += rows_to_process_from_batch; CHECK_LE(total_rows_processed, total_rows_assigned_to_device); rows_to_process_from_batch = row_offset_in_current_batch = 0; } }; // Manage memory for a single GPU template <typename GradientSumT> struct DeviceShard { int n_bins; int device_id; int shard_idx; // Position in the local array of shards dh::BulkAllocator ba; ELLPackMatrix ellpack_matrix; std::unique_ptr<RowPartitioner> row_partitioner; DeviceHistogram<GradientSumT> hist; /*! \brief row_ptr form HistogramCuts. */ common::Span<uint32_t> feature_segments; /*! \brief minimum value for each feature. */ common::Span<bst_float> min_fvalue; /*! \brief Cut. */ common::Span<bst_float> gidx_fvalue_map; /*! \brief global index of histogram, which is stored in ELLPack format. */ common::Span<common::CompressedByteT> gidx_buffer; /*! \brief Gradient pair for each row. */ common::Span<GradientPair> gpair; common::Span<int> monotone_constraints; common::Span<bst_float> prediction_cache; /*! \brief Sum gradient for each node. */ std::vector<GradientPair> node_sum_gradients; common::Span<GradientPair> node_sum_gradients_d; /*! The row offset for this shard. */ bst_uint row_begin_idx; bst_uint row_end_idx; bst_uint n_rows; TrainParam param; bool prediction_cache_initialised; bool use_shared_memory_histograms {false}; dh::CubMemory temp_memory; dh::PinnedMemory pinned_memory; std::vector<cudaStream_t> streams; common::Monitor monitor; std::vector<ValueConstraint> node_value_constraints; common::ColumnSampler column_sampler; FeatureInteractionConstraint interaction_constraints; using ExpandQueue = std::priority_queue<ExpandEntry, std::vector<ExpandEntry>, std::function<bool(ExpandEntry, ExpandEntry)>>; std::unique_ptr<ExpandQueue> qexpand; DeviceShard(int _device_id, int shard_idx, bst_uint row_begin, bst_uint row_end, TrainParam _param, uint32_t column_sampler_seed, uint32_t n_features) : device_id(_device_id), shard_idx(shard_idx), row_begin_idx(row_begin), row_end_idx(row_end), n_rows(row_end - row_begin), n_bins(0), param(std::move(_param)), prediction_cache_initialised(false), column_sampler(column_sampler_seed), interaction_constraints(param, n_features) { monitor.Init(std::string("DeviceShard") + std::to_string(device_id)); } void InitCompressedData( const common::HistogramCuts& hmat, size_t row_stride, bool is_dense); void CreateHistIndices( const SparsePage &row_batch, const common::HistogramCuts &hmat, const RowStateOnDevice &device_row_state, int rows_per_batch); ~DeviceShard() { dh::safe_cuda(cudaSetDevice(device_id)); for (auto& stream : streams) { dh::safe_cuda(cudaStreamDestroy(stream)); } } // Get vector of at least n initialised streams std::vector<cudaStream_t>& GetStreams(int n) { if (n > streams.size()) { for (auto& stream : streams) { dh::safe_cuda(cudaStreamDestroy(stream)); } streams.clear(); streams.resize(n); for (auto& stream : streams) { dh::safe_cuda(cudaStreamCreate(&stream)); } } return streams; } // Reset values for each update iteration // Note that the column sampler must be passed by value because it is not // thread safe void Reset(HostDeviceVector<GradientPair>* dh_gpair, int64_t num_columns) { if (param.grow_policy == TrainParam::kLossGuide) { qexpand.reset(new ExpandQueue(LossGuide)); } else { qexpand.reset(new ExpandQueue(DepthWise)); } this->column_sampler.Init(num_columns, param.colsample_bynode, param.colsample_bylevel, param.colsample_bytree); dh::safe_cuda(cudaSetDevice(device_id)); this->interaction_constraints.Reset(); std::fill(node_sum_gradients.begin(), node_sum_gradients.end(), GradientPair()); row_partitioner.reset(); // Release the device memory first before reallocating row_partitioner.reset(new RowPartitioner(device_id, n_rows)); dh::safe_cuda(cudaMemcpyAsync( gpair.data(), dh_gpair->ConstDevicePointer(device_id), gpair.size() * sizeof(GradientPair), cudaMemcpyHostToHost)); SubsampleGradientPair(device_id, gpair, param.subsample, row_begin_idx); hist.Reset(); } std::vector<DeviceSplitCandidate> EvaluateSplits( std::vector<int> nidxs, const RegTree& tree, size_t num_columns) { dh::safe_cuda(cudaSetDevice(device_id)); auto result_all = pinned_memory.GetSpan<DeviceSplitCandidate>(nidxs.size()); // Work out cub temporary memory requirement GPUTrainingParam gpu_param(param); DeviceSplitCandidateReduceOp op(gpu_param); size_t temp_storage_bytes = 0; DeviceSplitCandidate*dummy = nullptr; cub::DeviceReduce::Reduce( nullptr, temp_storage_bytes, dummy, dummy, num_columns, op, DeviceSplitCandidate()); // size in terms of DeviceSplitCandidate size_t cub_memory_size = std::ceil(static_cast<double>(temp_storage_bytes) / sizeof(DeviceSplitCandidate)); // Allocate enough temporary memory // Result for each nidx // + intermediate result for each column // + cub reduce memory auto temp_span = temp_memory.GetSpan<DeviceSplitCandidate>( nidxs.size() + nidxs.size() * num_columns +cub_memory_size*nidxs.size()); auto d_result_all = temp_span.subspan(0, nidxs.size()); auto d_split_candidates_all = temp_span.subspan(d_result_all.size(), nidxs.size() * num_columns); auto d_cub_memory_all = temp_span.subspan(d_result_all.size() + d_split_candidates_all.size(), cub_memory_size * nidxs.size()); auto& streams = this->GetStreams(nidxs.size()); for (auto i = 0ull; i < nidxs.size(); i++) { auto nidx = nidxs[i]; auto p_feature_set = column_sampler.GetFeatureSet(tree.GetDepth(nidx)); p_feature_set->Shard(GPUSet(device_id, 1)); auto d_sampled_features = p_feature_set->DeviceSpan(device_id); common::Span<int32_t> d_feature_set = interaction_constraints.Query(d_sampled_features, nidx); auto d_split_candidates = d_split_candidates_all.subspan(i * num_columns, d_feature_set.size()); DeviceNodeStats node(node_sum_gradients[nidx], nidx, param); auto d_result = d_result_all.subspan(i, 1); if (d_feature_set.size() == 0) { // Acting as a device side constructor for DeviceSplitCandidate. // DeviceSplitCandidate::IsValid is false so that ApplySplit can reject this // candidate. auto worst_candidate = DeviceSplitCandidate(); dh::safe_cuda(cudaMemcpyAsync(d_result.data(), &worst_candidate, sizeof(DeviceSplitCandidate), cudaMemcpyHostToDevice)); continue; } // One block for each feature int constexpr kBlockThreads = 256; EvaluateSplitKernel<kBlockThreads, GradientSumT> <<<uint32_t(d_feature_set.size()), kBlockThreads, 0, streams[i]>>>( hist.GetNodeHistogram(nidx), d_feature_set, node, ellpack_matrix, gpu_param, d_split_candidates, node_value_constraints[nidx], monotone_constraints); // Reduce over features to find best feature auto d_cub_memory = d_cub_memory_all.subspan(i * cub_memory_size, cub_memory_size); size_t cub_bytes = d_cub_memory.size() * sizeof(DeviceSplitCandidate); cub::DeviceReduce::Reduce(reinterpret_cast<void*>(d_cub_memory.data()), cub_bytes, d_split_candidates.data(), d_result.data(), d_split_candidates.size(), op, DeviceSplitCandidate(), streams[i]); } dh::safe_cuda(cudaMemcpy(result_all.data(), d_result_all.data(), sizeof(DeviceSplitCandidate) * d_result_all.size(), cudaMemcpyDeviceToHost)); return std::vector<DeviceSplitCandidate>(result_all.begin(), result_all.end()); } void BuildHist(int nidx) { hist.AllocateHistogram(nidx); auto d_node_hist = hist.GetNodeHistogram(nidx); auto d_ridx = row_partitioner->GetRows(nidx); auto d_gpair = gpair.data(); auto n_elements = d_ridx.size() * ellpack_matrix.row_stride; const size_t smem_size = use_shared_memory_histograms ? sizeof(GradientSumT) * ellpack_matrix.BinCount() : 0; const int items_per_thread = 8; const int block_threads = 256; const int grid_size = static_cast<int>( common::DivRoundUp(n_elements, items_per_thread * block_threads)); if (grid_size <= 0) { return; } SharedMemHistKernel<<<grid_size, block_threads, smem_size>>>( ellpack_matrix, d_ridx, d_node_hist.data(), d_gpair, n_elements, use_shared_memory_histograms); } void SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) { auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent); auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram); auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction); dh::LaunchN(device_id, n_bins, [=] __device__(size_t idx) { d_node_hist_subtraction[idx] = d_node_hist_parent[idx] - d_node_hist_histogram[idx]; }); } bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) { // Make sure histograms are already allocated hist.AllocateHistogram(nidx_subtraction); return hist.HistogramExists(nidx_histogram) && hist.HistogramExists(nidx_parent); } void UpdatePosition(int nidx, RegTree::Node split_node) { auto d_matrix = ellpack_matrix; row_partitioner->UpdatePosition( nidx, split_node.LeftChild(), split_node.RightChild(), [=] __device__(bst_uint ridx) { bst_float element = d_matrix.GetElement(ridx, split_node.SplitIndex()); // Missing value int new_position = 0; if (isnan(element)) { new_position = split_node.DefaultChild(); } else { if (element <= split_node.SplitCond()) { new_position = split_node.LeftChild(); } else { new_position = split_node.RightChild(); } } return new_position; }); } // After tree update is finished, update the position of all training // instances to their final leaf This information is used later to update the // prediction cache void FinalisePosition(RegTree* p_tree) { const auto d_nodes = temp_memory.GetSpan<RegTree::Node>(p_tree->GetNodes().size()); dh::safe_cuda(cudaMemcpy(d_nodes.data(), p_tree->GetNodes().data(), d_nodes.size() * sizeof(RegTree::Node), cudaMemcpyHostToDevice)); auto d_matrix = ellpack_matrix; row_partitioner->FinalisePosition( [=] __device__(bst_uint ridx, int position) { auto node = d_nodes[position]; while (!node.IsLeaf()) { bst_float element = d_matrix.GetElement(ridx, node.SplitIndex()); // Missing value if (isnan(element)) { position = node.DefaultChild(); } else { if (element <= node.SplitCond()) { position = node.LeftChild(); } else { position = node.RightChild(); } } node = d_nodes[position]; } return position; }); } void UpdatePredictionCache(bst_float* out_preds_d) { dh::safe_cuda(cudaSetDevice(device_id)); if (!prediction_cache_initialised) { dh::safe_cuda(cudaMemcpyAsync(prediction_cache.data(), out_preds_d, prediction_cache.size() * sizeof(bst_float), cudaMemcpyDefault)); } prediction_cache_initialised = true; CalcWeightTrainParam param_d(param); dh::safe_cuda( cudaMemcpyAsync(node_sum_gradients_d.data(), node_sum_gradients.data(), sizeof(GradientPair) * node_sum_gradients.size(), cudaMemcpyHostToDevice)); auto d_position = row_partitioner->GetPosition(); auto d_ridx = row_partitioner->GetRows(); auto d_node_sum_gradients = node_sum_gradients_d.data(); auto d_prediction_cache = prediction_cache.data(); dh::LaunchN( device_id, prediction_cache.size(), [=] __device__(int local_idx) { int pos = d_position[local_idx]; bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]); d_prediction_cache[d_ridx[local_idx]] += weight * param_d.learning_rate; }); dh::safe_cuda(cudaMemcpy( out_preds_d, prediction_cache.data(), prediction_cache.size() * sizeof(bst_float), cudaMemcpyDefault)); row_partitioner.reset(); } void AllReduceHist(int nidx, dh::AllReducer* reducer) { monitor.StartCuda("AllReduce"); auto d_node_hist = hist.GetNodeHistogram(nidx).data(); reducer->AllReduceSum( shard_idx, reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist), reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist), ellpack_matrix.BinCount() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT))); reducer->Synchronize(device_id); monitor.StopCuda("AllReduce"); } /** * \brief Build GPU local histograms for the left and right child of some parent node */ void BuildHistLeftRight(int nidx_parent, int nidx_left, int nidx_right, dh::AllReducer* reducer) { auto build_hist_nidx = nidx_left; auto subtraction_trick_nidx = nidx_right; auto left_node_rows = row_partitioner->GetRows(nidx_left).size(); auto right_node_rows = row_partitioner->GetRows(nidx_right).size(); // Decide whether to build the left histogram or right histogram // Find the largest number of training instances on any given Shard // Assume this will be the bottleneck and avoid building this node if // possible std::vector<size_t> max_reduce; max_reduce.push_back(left_node_rows); max_reduce.push_back(right_node_rows); reducer->HostMaxAllReduce(&max_reduce); bool fewer_right = max_reduce[1] < max_reduce[0]; if (fewer_right) { std::swap(build_hist_nidx, subtraction_trick_nidx); } this->BuildHist(build_hist_nidx); this->AllReduceHist(build_hist_nidx, reducer); // Check whether we can use the subtraction trick to calculate the other bool do_subtraction_trick = this->CanDoSubtractionTrick( nidx_parent, build_hist_nidx, subtraction_trick_nidx); if (do_subtraction_trick) { // Calculate other histogram using subtraction trick this->SubtractionTrick(nidx_parent, build_hist_nidx, subtraction_trick_nidx); } else { // Calculate other histogram manually this->BuildHist(subtraction_trick_nidx); this->AllReduceHist(subtraction_trick_nidx, reducer); } } void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) { RegTree& tree = *p_tree; GradStats left_stats; left_stats.Add(candidate.split.left_sum); GradStats right_stats; right_stats.Add(candidate.split.right_sum); GradStats parent_sum; parent_sum.Add(left_stats); parent_sum.Add(right_stats); node_value_constraints.resize(tree.GetNodes().size()); auto base_weight = node_value_constraints[candidate.nid].CalcWeight(param, parent_sum); auto left_weight = node_value_constraints[candidate.nid].CalcWeight(param, left_stats)*param.learning_rate; auto right_weight = node_value_constraints[candidate.nid].CalcWeight(param, right_stats)*param.learning_rate; tree.ExpandNode(candidate.nid, candidate.split.findex, candidate.split.fvalue, candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_sum.sum_hess); // Set up child constraints node_value_constraints.resize(tree.GetNodes().size()); node_value_constraints[candidate.nid].SetChild( param, tree[candidate.nid].SplitIndex(), left_stats, right_stats, &node_value_constraints[tree[candidate.nid].LeftChild()], &node_value_constraints[tree[candidate.nid].RightChild()]); node_sum_gradients[tree[candidate.nid].LeftChild()] = candidate.split.left_sum; node_sum_gradients[tree[candidate.nid].RightChild()] = candidate.split.right_sum; interaction_constraints.Split(candidate.nid, tree[candidate.nid].SplitIndex(), tree[candidate.nid].LeftChild(), tree[candidate.nid].RightChild()); } void InitRoot(RegTree* p_tree, HostDeviceVector<GradientPair>* gpair_all, dh::AllReducer* reducer, int64_t num_columns) { constexpr int kRootNIdx = 0; const auto &gpair = gpair_all->DeviceSpan(device_id); dh::SumReduction(temp_memory, gpair, node_sum_gradients_d, gpair.size()); reducer->AllReduceSum( shard_idx, reinterpret_cast<float*>(node_sum_gradients_d.data()), reinterpret_cast<float*>(node_sum_gradients_d.data()), 2); reducer->Synchronize(device_id); dh::safe_cuda(cudaMemcpy(node_sum_gradients.data(), node_sum_gradients_d.data(), sizeof(GradientPair), cudaMemcpyDeviceToHost)); this->BuildHist(kRootNIdx); this->AllReduceHist(kRootNIdx, reducer); // Remember root stats p_tree->Stat(kRootNIdx).sum_hess = node_sum_gradients[kRootNIdx].GetHess(); auto weight = CalcWeight(param, node_sum_gradients[kRootNIdx]); p_tree->Stat(kRootNIdx).base_weight = weight; (*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight); // Initialise root constraint node_value_constraints.resize(p_tree->GetNodes().size()); // Generate first split auto split = this->EvaluateSplits({kRootNIdx}, *p_tree, num_columns); qexpand->push( ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split.at(0), 0)); } void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat, RegTree* p_tree, dh::AllReducer* reducer) { auto& tree = *p_tree; monitor.StartCuda("Reset"); this->Reset(gpair_all, p_fmat->Info().num_col_); monitor.StopCuda("Reset"); monitor.StartCuda("InitRoot"); this->InitRoot(p_tree, gpair_all, reducer, p_fmat->Info().num_col_); monitor.StopCuda("InitRoot"); auto timestamp = qexpand->size(); auto num_leaves = 1; while (!qexpand->empty()) { ExpandEntry candidate = qexpand->top(); qexpand->pop(); if (!candidate.IsValid(param, num_leaves)) { continue; } this->ApplySplit(candidate, p_tree); num_leaves++; int left_child_nidx = tree[candidate.nid].LeftChild(); int right_child_nidx = tree[candidate.nid].RightChild(); // Only create child entries if needed if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx), num_leaves)) { monitor.StartCuda("UpdatePosition"); this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]); monitor.StopCuda("UpdatePosition"); monitor.StartCuda("BuildHist"); this->BuildHistLeftRight(candidate.nid, left_child_nidx, right_child_nidx, reducer); monitor.StopCuda("BuildHist"); monitor.StartCuda("EvaluateSplits"); auto splits = this->EvaluateSplits({left_child_nidx, right_child_nidx}, *p_tree, p_fmat->Info().num_col_); monitor.StopCuda("EvaluateSplits"); qexpand->push(ExpandEntry(left_child_nidx, tree.GetDepth(left_child_nidx), splits.at(0), timestamp++)); qexpand->push(ExpandEntry(right_child_nidx, tree.GetDepth(right_child_nidx), splits.at(1), timestamp++)); } } monitor.StartCuda("FinalisePosition"); this->FinalisePosition(p_tree); monitor.StopCuda("FinalisePosition"); } }; template <typename GradientSumT> inline void DeviceShard<GradientSumT>::InitCompressedData( const common::HistogramCuts &hmat, size_t row_stride, bool is_dense) { n_bins = hmat.Ptrs().back(); int null_gidx_value = hmat.Ptrs().back(); CHECK(!(param.max_leaves == 0 && param.max_depth == 0)) << "Max leaves and max depth cannot both be unconstrained for " "gpu_hist."; int max_nodes = param.max_leaves > 0 ? param.max_leaves * 2 : MaxNodesDepth(param.max_depth); ba.Allocate(device_id, &gpair, n_rows, &prediction_cache, n_rows, &node_sum_gradients_d, max_nodes, &feature_segments, hmat.Ptrs().size(), &gidx_fvalue_map, hmat.Values().size(), &min_fvalue, hmat.MinValues().size(), &monotone_constraints, param.monotone_constraints.size()); dh::CopyVectorToDeviceSpan(gidx_fvalue_map, hmat.Values()); dh::CopyVectorToDeviceSpan(min_fvalue, hmat.MinValues()); dh::CopyVectorToDeviceSpan(feature_segments, hmat.Ptrs()); dh::CopyVectorToDeviceSpan(monotone_constraints, param.monotone_constraints); node_sum_gradients.resize(max_nodes); // allocate compressed bin data int num_symbols = n_bins + 1; // Required buffer size for storing data matrix in ELLPack format. size_t compressed_size_bytes = common::CompressedBufferWriter::CalculateBufferSize(row_stride * n_rows, num_symbols); ba.Allocate(device_id, &gidx_buffer, compressed_size_bytes); thrust::fill( thrust::device_pointer_cast(gidx_buffer.data()), thrust::device_pointer_cast(gidx_buffer.data() + gidx_buffer.size()), 0); ellpack_matrix.Init( feature_segments, min_fvalue, gidx_fvalue_map, row_stride, common::CompressedIterator<uint32_t>(gidx_buffer.data(), num_symbols), is_dense, null_gidx_value); // check if we can use shared memory for building histograms // (assuming atleast we need 2 CTAs per SM to maintain decent latency // hiding) auto histogram_size = sizeof(GradientSumT) * hmat.Ptrs().back(); auto max_smem = dh::MaxSharedMemory(device_id); if (histogram_size <= max_smem) { use_shared_memory_histograms = true; } // Init histogram hist.Init(device_id, hmat.Ptrs().back()); } template <typename GradientSumT> inline void DeviceShard<GradientSumT>::CreateHistIndices( const SparsePage &row_batch, const common::HistogramCuts &hmat, const RowStateOnDevice &device_row_state, int rows_per_batch) { // Has any been allocated for me in this batch? if (!device_row_state.rows_to_process_from_batch) return; unsigned int null_gidx_value = hmat.Ptrs().back(); size_t row_stride = this->ellpack_matrix.row_stride; const auto &offset_vec = row_batch.offset.ConstHostVector(); int num_symbols = n_bins + 1; // bin and compress entries in batches of rows size_t gpu_batch_nrows = std::min( dh::TotalMemory(device_id) / (16 * row_stride * sizeof(Entry)), static_cast<size_t>(device_row_state.rows_to_process_from_batch)); const std::vector<Entry>& data_vec = row_batch.data.ConstHostVector(); size_t gpu_nbatches = common::DivRoundUp(device_row_state.rows_to_process_from_batch, gpu_batch_nrows); for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) { size_t batch_row_begin = gpu_batch * gpu_batch_nrows; size_t batch_row_end = (gpu_batch + 1) * gpu_batch_nrows; if (batch_row_end > device_row_state.rows_to_process_from_batch) { batch_row_end = device_row_state.rows_to_process_from_batch; } size_t batch_nrows = batch_row_end - batch_row_begin; const auto ent_cnt_begin = offset_vec[device_row_state.row_offset_in_current_batch + batch_row_begin]; const auto ent_cnt_end = offset_vec[device_row_state.row_offset_in_current_batch + batch_row_end]; /*! \brief row offset in SparsePage (the input data). */ dh::device_vector<size_t> row_ptrs(batch_nrows+1); thrust::copy( offset_vec.data() + device_row_state.row_offset_in_current_batch + batch_row_begin, offset_vec.data() + device_row_state.row_offset_in_current_batch + batch_row_end + 1, row_ptrs.begin()); // number of entries in this batch. size_t n_entries = ent_cnt_end - ent_cnt_begin; dh::device_vector<Entry> entries_d(n_entries); // copy data entries to device. dh::safe_cuda (cudaMemcpy (entries_d.data().get(), data_vec.data() + ent_cnt_begin, n_entries * sizeof(Entry), cudaMemcpyDefault)); const dim3 block3(32, 8, 1); // 256 threads const dim3 grid3(common::DivRoundUp(batch_nrows, block3.x), common::DivRoundUp(row_stride, block3.y), 1); CompressBinEllpackKernel<<<grid3, block3>>> (common::CompressedBufferWriter(num_symbols), gidx_buffer.data(), row_ptrs.data().get(), entries_d.data().get(), gidx_fvalue_map.data(), feature_segments.data(), device_row_state.total_rows_processed + batch_row_begin, batch_nrows, row_stride, null_gidx_value); } } // An instance of this type is created which keeps track of total number of rows to process, // rows processed thus far, rows to process and the offset from the current sparse page batch // to begin processing on each device class DeviceHistogramBuilderState { public: template <typename GradientSumT> explicit DeviceHistogramBuilderState( const std::vector<std::unique_ptr<DeviceShard<GradientSumT>>> &shards) { device_row_states_.reserve(shards.size()); for (const auto &shard : shards) { device_row_states_.push_back(RowStateOnDevice(shard->n_rows)); } } const RowStateOnDevice &GetRowStateOnDevice(int idx) const { return device_row_states_[idx]; } // This method is invoked at the beginning of each sparse page batch. This distributes // the rows in the sparse page to the different devices. // TODO(sriramch): Think of a way to utilize *all* the GPUs to build the compressed bins. void BeginBatch(const SparsePage &batch) { size_t rem_rows = batch.Size(); size_t row_offset_in_current_batch = 0; for (auto &device_row_state : device_row_states_) { // Do we have anymore left to process from this batch on this device? if (device_row_state.total_rows_assigned_to_device > device_row_state.total_rows_processed) { // There are still some rows that needs to be assigned to this device device_row_state.rows_to_process_from_batch = std::min( device_row_state.total_rows_assigned_to_device - device_row_state.total_rows_processed, rem_rows); } else { // All rows have been assigned to this device device_row_state.rows_to_process_from_batch = 0; } device_row_state.row_offset_in_current_batch = row_offset_in_current_batch; row_offset_in_current_batch += device_row_state.rows_to_process_from_batch; rem_rows -= device_row_state.rows_to_process_from_batch; } } // This method is invoked after completion of each sparse page batch void EndBatch() { for (auto &rs : device_row_states_) { rs.Advance(); } } private: std::vector<RowStateOnDevice> device_row_states_; }; template <typename GradientSumT> class GPUHistMakerSpecialised { public: GPUHistMakerSpecialised() : initialised_{false}, p_last_fmat_{nullptr} {} void Init(const std::vector<std::pair<std::string, std::string>>& args, LearnerTrainParam const* lparam) { param_.InitAllowUnknown(args); learner_param_ = lparam; hist_maker_param_.InitAllowUnknown(args); auto devices = GPUSet::All(learner_param_->gpu_id, learner_param_->n_gpus); n_devices_ = devices.Size(); CHECK(n_devices_ != 0) << "Must have at least one device"; dist_ = GPUDistribution::Block(devices); dh::CheckComputeCapability(); monitor_.Init("updater_gpu_hist"); } ~GPUHistMakerSpecialised() { dh::GlobalMemoryLogger().Log(); } void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) { monitor_.StartCuda("Update"); // rescale learning rate according to size of trees float lr = param_.learning_rate; param_.learning_rate = lr / trees.size(); ValueConstraint::Init(&param_, dmat->Info().num_col_); // build tree try { for (xgboost::RegTree* tree : trees) { this->UpdateTree(gpair, dmat, tree); } dh::safe_cuda(cudaGetLastError()); } catch (const std::exception& e) { LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl; } param_.learning_rate = lr; monitor_.StopCuda("Update"); } void InitDataOnce(DMatrix* dmat) { info_ = &dmat->Info(); int n_devices = dist_.Devices().Size(); device_list_.resize(n_devices); for (int index = 0; index < n_devices; ++index) { int device_id = dist_.Devices().DeviceId(index); device_list_[index] = device_id; } reducer_.Init(device_list_); // Synchronise the column sampling seed uint32_t column_sampling_seed = common::GlobalRandom()(); rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0); // Create device shards shards_.resize(n_devices); dh::ExecuteIndexShards( &shards_, [&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) { dh::safe_cuda(cudaSetDevice(dist_.Devices().DeviceId(idx))); size_t start = dist_.ShardStart(info_->num_row_, idx); size_t size = dist_.ShardSize(info_->num_row_, idx); shard = std::unique_ptr<DeviceShard<GradientSumT>>( new DeviceShard<GradientSumT>(dist_.Devices().DeviceId(idx), idx, start, start + size, param_, column_sampling_seed, info_->num_col_)); }); monitor_.StartCuda("Quantiles"); // Create the quantile sketches for the dmatrix and initialize HistogramCuts size_t row_stride = common::DeviceSketch(param_, *learner_param_, hist_maker_param_.gpu_batch_nrows, dmat, &hmat_); monitor_.StopCuda("Quantiles"); n_bins_ = hmat_.Ptrs().back(); auto is_dense = info_->num_nonzero_ == info_->num_row_ * info_->num_col_; // Init global data for each shard monitor_.StartCuda("InitCompressedData"); dh::ExecuteIndexShards( &shards_, [&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) { dh::safe_cuda(cudaSetDevice(shard->device_id)); shard->InitCompressedData(hmat_, row_stride, is_dense); }); monitor_.StopCuda("InitCompressedData"); monitor_.StartCuda("BinningCompression"); DeviceHistogramBuilderState hist_builder_row_state(shards_); for (const auto &batch : dmat->GetRowBatches()) { hist_builder_row_state.BeginBatch(batch); dh::ExecuteIndexShards( &shards_, [&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) { dh::safe_cuda(cudaSetDevice(shard->device_id)); shard->CreateHistIndices(batch, hmat_, hist_builder_row_state.GetRowStateOnDevice(idx), hist_maker_param_.gpu_batch_nrows); }); hist_builder_row_state.EndBatch(); } monitor_.StopCuda("BinningCompression"); p_last_fmat_ = dmat; initialised_ = true; } void InitData(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat) { if (!initialised_) { monitor_.StartCuda("InitDataOnce"); this->InitDataOnce(dmat); monitor_.StopCuda("InitDataOnce"); } } // Only call this method for testing void CheckTreesSynchronized(const std::vector<RegTree>& local_trees) const { std::string s_model; common::MemoryBufferStream fs(&s_model); int rank = rabit::GetRank(); if (rank == 0) { local_trees.front().Save(&fs); } fs.Seek(0); rabit::Broadcast(&s_model, 0); RegTree reference_tree; reference_tree.Load(&fs); for (const auto& tree : local_trees) { CHECK(tree == reference_tree); } } void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree) { monitor_.StartCuda("InitData"); this->InitData(gpair, p_fmat); monitor_.StopCuda("InitData"); std::vector<RegTree> trees(shards_.size()); for (auto& tree : trees) { tree = *p_tree; } gpair->Reshard(dist_); // Launch one thread for each device "shard" containing a subset of rows. // Threads will cooperatively build the tree, synchronising over histograms. // Each thread will redundantly build its own copy of the tree dh::ExecuteIndexShards( &shards_, [&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) { shard->UpdateTree(gpair, p_fmat, &trees.at(idx), &reducer_); }); // All trees are expected to be identical if (hist_maker_param_.debug_synchronize) { this->CheckTreesSynchronized(trees); } // Write the output tree *p_tree = trees.front(); } bool UpdatePredictionCache( const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) { if (shards_.empty() || p_last_fmat_ == nullptr || p_last_fmat_ != data) { return false; } monitor_.StartCuda("UpdatePredictionCache"); p_out_preds->Shard(dist_.Devices()); dh::ExecuteIndexShards( &shards_, [&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) { dh::safe_cuda(cudaSetDevice(shard->device_id)); shard->UpdatePredictionCache( p_out_preds->DevicePointer(shard->device_id)); }); monitor_.StopCuda("UpdatePredictionCache"); return true; } TrainParam param_; // NOLINT common::HistogramCuts hmat_; // NOLINT MetaInfo* info_; // NOLINT std::vector<std::unique_ptr<DeviceShard<GradientSumT>>> shards_; // NOLINT private: bool initialised_; int n_devices_; int n_bins_; GPUHistMakerTrainParam hist_maker_param_; LearnerTrainParam const* learner_param_; dh::AllReducer reducer_; DMatrix* p_last_fmat_; GPUDistribution dist_; common::Monitor monitor_; /*! List storing device id. */ std::vector<int> device_list_; }; class GPUHistMaker : public TreeUpdater { public: void Init( const std::vector<std::pair<std::string, std::string>>& args) override { hist_maker_param_.InitAllowUnknown(args); float_maker_.reset(); double_maker_.reset(); if (hist_maker_param_.single_precision_histogram) { float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>()); float_maker_->Init(args, tparam_); } else { double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>()); double_maker_->Init(args, tparam_); } } void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override { if (hist_maker_param_.single_precision_histogram) { float_maker_->Update(gpair, dmat, trees); } else { double_maker_->Update(gpair, dmat, trees); } } bool UpdatePredictionCache( const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override { if (hist_maker_param_.single_precision_histogram) { return float_maker_->UpdatePredictionCache(data, p_out_preds); } else { return double_maker_->UpdatePredictionCache(data, p_out_preds); } } private: GPUHistMakerTrainParam hist_maker_param_; std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_; std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_; }; #if !defined(GTEST_TEST) XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist") .describe("Grow tree with GPU.") .set_body([]() { return new GPUHistMaker(); }); #endif // !defined(GTEST_TEST) } // namespace tree } // namespace xgboost
ab5a3cabe1fe5d11d9f006691a0f53d981c8aa53.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> #include<stdlib.h> #include <math.h> #include<valarray> using namespace std; int nsamples = 6000; float float_rand( float min, float max ) { return min + (rand() / (RAND_MAX/(max-min)) ); } __global__ void solver(float* res, const int n, const float* rand_x, const float* rand_y) { int tid = threadIdx.x + blockIdx.x*blockDim.x; float x,y; float val = 0.0; for (int i = 0; i < n; i++) { x = rand_x[i]; y = rand_y[i]; val += expf(-x*x-y*y); } __syncthreads(); res[tid] = val/n/(1.0/16.0); } int main() { int blocks = 300; int threads = 300; float h_res[threads*blocks]; float *d_res; float *h_x = new float[nsamples]; float *h_y = new float[nsamples]; srand(time(NULL)); for (int i = 0; i < nsamples; i++) { h_x[i] = float_rand(-2, 2); h_y[i] = float_rand(-2, 2); } float *d_x, *d_y; hipMalloc(&d_res, threads*blocks*sizeof(float)); hipMalloc(&d_x, nsamples*sizeof(float)); hipMalloc(&d_y, nsamples*sizeof(float)); hipMemcpy(d_x, h_x, nsamples*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, h_y, nsamples*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( solver), dim3(threads),dim3(blocks), 0, 0, d_res, nsamples, d_x, d_y); hipMemcpy(h_res, d_res, threads*blocks*sizeof(float), hipMemcpyDeviceToHost); valarray<float> myvalarray(h_res,threads*blocks); float result = myvalarray.sum()/float(threads*blocks); cout<<"Result: "<<result<<endl; hipFree(d_res); hipFree(d_x); hipFree(d_y); free(h_x); free(h_y); return 0; }
ab5a3cabe1fe5d11d9f006691a0f53d981c8aa53.cu
#include <iostream> #include <stdio.h> #include<stdlib.h> #include <math.h> #include<valarray> using namespace std; int nsamples = 6000; float float_rand( float min, float max ) { return min + (rand() / (RAND_MAX/(max-min)) ); } __global__ void solver(float* res, const int n, const float* rand_x, const float* rand_y) { int tid = threadIdx.x + blockIdx.x*blockDim.x; float x,y; float val = 0.0; for (int i = 0; i < n; i++) { x = rand_x[i]; y = rand_y[i]; val += expf(-x*x-y*y); } __syncthreads(); res[tid] = val/n/(1.0/16.0); } int main() { int blocks = 300; int threads = 300; float h_res[threads*blocks]; float *d_res; float *h_x = new float[nsamples]; float *h_y = new float[nsamples]; srand(time(NULL)); for (int i = 0; i < nsamples; i++) { h_x[i] = float_rand(-2, 2); h_y[i] = float_rand(-2, 2); } float *d_x, *d_y; cudaMalloc(&d_res, threads*blocks*sizeof(float)); cudaMalloc(&d_x, nsamples*sizeof(float)); cudaMalloc(&d_y, nsamples*sizeof(float)); cudaMemcpy(d_x, h_x, nsamples*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, h_y, nsamples*sizeof(float), cudaMemcpyHostToDevice); solver<<<threads,blocks>>>(d_res, nsamples, d_x, d_y); cudaMemcpy(h_res, d_res, threads*blocks*sizeof(float), cudaMemcpyDeviceToHost); valarray<float> myvalarray(h_res,threads*blocks); float result = myvalarray.sum()/float(threads*blocks); cout<<"Result: "<<result<<endl; cudaFree(d_res); cudaFree(d_x); cudaFree(d_y); free(h_x); free(h_y); return 0; }
7172781d2e8fd5185da835de94c0d3544b382d6f.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" FusedOp::FusedOp(FFModel& model, Op* op) : Op(model, OP_FUSED, op->name, 0) { numInputs = op->numInputs; for (int i = 0; i < numInputs; i++) { inputs[i] = op->inputs[i]; input_lps[i] = op->input_lps[i]; input_grad_lps[i] = op->input_grad_lps[i]; } numWeights = op->numWeights; for (int i = 0; i < numWeights; i++) { weights[i] = op->weights[i]; weights[i].owner_op = this; weights[i].owner_idx = i; } numOutputs = op->numOutputs; for (int i = 0; i < numOutputs; i++) { outputs[i] = op->outputs[i]; outputs[i].owner_op = this; outputs[i].owner_idx = i; } numOperators = 1; op_num_inputs[0] = numInputs; op_num_weights[0] = numWeights; op_num_outputs[0] = numOutputs; op_op_type[0] = op->op_type; operators[0] = op; for (int i = 0; i < numInputs; i++) { op_input_source[i] = SOURCE_INPUT; op_input_idx[i] = i; } for (int i = 0; i < numWeights; i++) { op_weight_source[i] = SOURCE_WEIGHT; op_weight_idx[i] = i; } for (int i = 0; i < numOutputs; i++) { op_output_source[i] = SOURCE_OUTPUT; op_output_idx[i] = i; } task_is = op->task_is; } bool FusedOp::add_operator(FFModel& model, Op* op) { Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; // Currently assume fusion optimization is performed // after create_weights and create_outputs // So task_is and op->task_is are not empty Domain my_domain = runtime->get_index_space_domain(ctx, task_is); Domain op_domain = runtime->get_index_space_domain(ctx, op->task_is); ParallelConfig my_config, op_config; assert(model.config.find_parallel_config(my_domain.get_dim(), name, my_config)); assert(model.config.find_parallel_config(op_domain.get_dim(), op->name, op_config)); if (my_config == op_config) { // Do nothing } else { return false; } int input_offset = 0, weight_offset = 0, output_offset = 0; for (int i = 0; i < numOperators; i++) { input_offset += op_num_inputs[i]; weight_offset += op_num_weights[i]; output_offset += op_num_outputs[i]; } if ((input_offset + op->numInputs > MAX_NUM_FUSED_TENSORS) || (weight_offset + op->numWeights > MAX_NUM_FUSED_TENSORS) || (output_offset + op->numOutputs > MAX_NUM_FUSED_TENSORS)) { fprintf(stderr, "Cannot fuse. Consider increase MAX_NUM_FUSED_TENSORS\n"); return false; } if (numOperators + 1 > MAX_NUM_FUSED_OPERATORS) { fprintf(stderr, "Reach to the fusion limit. Consider increase MAX_NUM_FUSED_OPERATORS"); return false; } // Set inputs for (int i = 0; i < op->numInputs; i++) { bool found = false; for (int j = 0; j < input_offset; j++) if (inputs[j].region == op->inputs[i].region) { // This input is one of my inputs assert(!found); assert(inputs[j].region != LogicalRegion::NO_REGION); op_input_source[input_offset + i] = SOURCE_INPUT; op_input_idx[input_offset + i] = j; found = true; break; } for (int j = 0; j < output_offset; j++) if (outputs[j].region == op->inputs[i].region) { // This input is one of my outputs assert(!found); assert(outputs[j].region != LogicalRegion::NO_REGION); op_input_source[input_offset + i] = SOURCE_OUTPUT; op_input_idx[input_offset + i] = j; found = true; break; } if (found) { // Do nothing } else { inputs[numInputs] = op->inputs[i]; input_lps[numInputs] = op->input_lps[i]; input_grad_lps[numInputs] = op->input_grad_lps[i]; op_input_source[input_offset+i] = SOURCE_INPUT; op_input_idx[input_offset+i] = numInputs; numInputs += 1; } } // Set weights for (int i = 0; i < op->numWeights; i++) { bool found = false; for (int j = 0; j < numWeights; j++) if (weights[j].region == op->weights[i].region) { assert(!found); assert(weights[j].region != LogicalRegion::NO_REGION); op_weight_source[weight_offset + i] = SOURCE_WEIGHT; op_weight_idx[weight_offset + i] = j; found = true; break; } if (found) { // Do nothing } else { weights[numWeights] = op->weights[i]; weights[numWeights].owner_op = this; weights[numWeights].owner_idx = numWeights; op_weight_source[weight_offset+i] = SOURCE_WEIGHT; op_weight_idx[weight_offset+i] = numWeights; numWeights += 1; } } // Set outputs for (int i = 0; i < op->numOutputs; i++) { outputs[numOutputs] = op->outputs[i]; outputs[numOutputs].owner_op = this; outputs[numOutputs].owner_idx = numOutputs; op_output_source[output_offset+i] = SOURCE_OUTPUT; op_output_idx[output_offset+i] = numOutputs; numOutputs += 1; } assert(op->numInputs > 0); assert(op->numWeights >= 0); assert(op->numOutputs > 0); op_num_inputs[numOperators] = op->numInputs; op_num_weights[numOperators] = op->numWeights; op_num_outputs[numOperators] = op->numOutputs; op_op_type[numOperators] = op->op_type; operators[numOperators] = op; numOperators += 1; assert(numOperators <= MAX_NUM_FUSED_OPERATORS); if (numInputs > MAX_NUM_INPUTS) { fprintf(stderr, "Reach to the #inputs limit during fusion.\n" "Consider increase MAX_NUM_INPUTS to allow more fusions.\n"); return false; } if (numWeights > MAX_NUM_WEIGHTS) { fprintf(stderr, "Reach to the #weights limit during fusion.\n" "Consider increase MAX_NUM_WEIGHTS to allow more fusions.\n"); return false; } if (numOutputs > MAX_NUM_OUTPUTS) { fprintf(stderr, "Reach to the #outputs limit during fusion.\n" "Consider increase MAX_NUM_OUTPUTS to allow more fusions.\n"); } return true; } void FusedOp::create_weights(FFModel& model) { assert(false && "Weights should be created before fusion optimizations"); } void FusedOp::create_output_and_partition(FFModel& model) { assert(false && "Outputs should be created before fusion optimizations"); } OpMeta* FusedOp::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { const FusedOp* fused = (FusedOp*) task->args; const FusedOpMeta* metas = (FusedOpMeta*) task->local_args; FusedOpMeta* local_meta = new FusedOpMeta(); memcpy(local_meta, metas, sizeof(FusedOpMeta)); local_meta->fused_op = (FusedOp*) malloc(sizeof(FusedOp)); memcpy(local_meta->fused_op, fused, sizeof(FusedOp)); return ((OpMeta*)local_meta); } void FusedOp::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; // Call init methods in individual operators Domain domain = runtime->get_index_space_domain(ctx, task_is); for (int i = 0; i < numOperators; i++) { operators[i]->init(ff); for (size_t j = 0; j < domain.get_volume(); j++) fused_meta[j].meta[i] = operators[i]->meta[j]; } for (size_t j = 0; j < domain.get_volume(); j++) fused_meta[j].numOperators = numOperators; switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ argmap.set_point(*it, TaskArgument(&fused_meta[idx++], sizeof(FusedOpMeta))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(FUSEDOP_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(FusedOp)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ meta[idx++] = fm.get_result<OpMeta*>(*it); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[...](I): inputs regions[...](I): weights regions[...](I): outputs */ __host__ void FusedOp::forward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { //const FusedOp* fused = (FusedOp*) task->args; const FusedOpMeta* metas = *((FusedOpMeta**) task->local_args); const FusedOp* fused = metas->fused_op; assert(metas->numOperators == fused->numOperators); assert(regions.size() == task->regions.size()); assert((int)regions.size() == fused->numInputs+fused->numWeights+fused->numOutputs); Domain input_domain[MAX_NUM_INPUTS]; Domain weight_domain[MAX_NUM_WEIGHTS]; Domain output_domain[MAX_NUM_OUTPUTS]; const float* input_ptr[MAX_NUM_INPUTS]; const float* weight_ptr[MAX_NUM_WEIGHTS]; float* output_ptr[MAX_NUM_OUTPUTS]; assert(fused->numInputs <= MAX_NUM_INPUTS); for (int i = 0; i < fused->numInputs; i++) { input_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i].region.get_index_space()); input_ptr[i] = helperGetTensorPointerRO<float>( regions[i], task->regions[i], FID_DATA, ctx, runtime); } int roff = fused->numInputs; assert(fused->numWeights <= MAX_NUM_WEIGHTS); for (int i = 0; i < fused->numWeights; i++) { weight_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); weight_ptr[i] = helperGetTensorPointerRO<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); } roff += fused->numWeights; assert(fused->numOutputs <= MAX_NUM_OUTPUTS); for (int i = 0; i < fused->numOutputs; i++) { output_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); output_ptr[i] = helperGetTensorPointerWO<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); } // Assert that all meta share the same dnn/blas handler int start = 0; for (start = 0; start < fused->numOperators; start++) if (metas->meta[start] != NULL) break; for (int op = start+1; op < fused->numOperators; op++) if (metas->meta[op] != NULL) { assert(metas->meta[start]->handle.blas == metas->meta[op]->handle.blas); assert(metas->meta[start]->handle.dnn == metas->meta[op]->handle.dnn); } #ifndef DISABLE_LEGION_CUDA_HIJACK if (start < fused->numOperators) { hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDA(hipblasSetStream(metas->meta[start]->handle.blas, stream)); checkCUDNN(cudnnSetStream(metas->meta[start]->handle.dnn, stream)); } #endif int ioff = 0, woff = 0, ooff = 0; for (int op = 0; op < fused->numOperators; op++) { Domain my_id[MAX_NUM_INPUTS]; Domain my_wd[MAX_NUM_WEIGHTS]; Domain my_od[MAX_NUM_OUTPUTS]; const float* my_ip[MAX_NUM_INPUTS]; const float* my_wp[MAX_NUM_WEIGHTS]; float* my_op[MAX_NUM_OUTPUTS]; for (int i = 0; i < fused->op_num_inputs[op]; i++) { int my_off = fused->op_input_idx[i+ioff]; if (fused->op_input_source[i+ioff] == SOURCE_INPUT) { my_id[i] = input_domain[my_off]; my_ip[i] = input_ptr[my_off]; } else if (fused->op_input_source[i+ioff] == SOURCE_OUTPUT) { my_id[i] = output_domain[my_off]; my_ip[i] = output_ptr[my_off]; } else assert(false); } for (int i = 0; i < fused->op_num_weights[op]; i++) { assert(fused->op_weight_source[i+woff] == SOURCE_WEIGHT); my_wd[i] = weight_domain[fused->op_weight_idx[i+woff]]; my_wp[i] = weight_ptr[fused->op_weight_idx[i+woff]]; } for (int i = 0; i < fused->op_num_outputs[op]; i++) { assert(fused->op_output_source[i+ooff] == SOURCE_OUTPUT); my_od[i] = output_domain[fused->op_output_idx[i+ooff]]; my_op[i] = output_ptr[fused->op_output_idx[i+ooff]]; } switch(fused->op_op_type[op]) { case OP_CONCAT: { assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); ConcatMeta* m = (ConcatMeta*) metas->meta[op]; int num_inputs = fused->op_num_inputs[op]; Concat::forward_kernel(my_op[0], my_ip, num_inputs, m->axis, my_od[0], my_id); break; } case OP_CONV2D: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_dim() == 4); assert(my_wd[0].get_dim() == 4); assert(my_od[0].get_dim() == 4); Conv2DMeta* m = (Conv2DMeta*) metas->meta[op]; Conv2D::forward_kernel(m, my_ip[0], my_op[0], my_wp[0], my_wp[1]); break; } case OP_BATCHNORM: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_dim() == 4); assert(my_od[0].get_dim() == 4); assert(my_wd[0].get_dim() == 1); assert(my_wd[1].get_dim() == 1); BatchNormMeta* m = (BatchNormMeta*) metas->meta[op]; BatchNorm::forward_kernel(m, my_ip[0], my_op[0], my_wp[0], my_wp[1]); break; } case OP_DROPOUT: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); DropoutMeta* m = (DropoutMeta*) metas->meta[op]; Dropout::forward_kernel(m, my_ip[0], my_op[0]); break; } case OP_LINEAR: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 2); assert(fused->op_num_outputs[op] == 1); Rect<2> kernel_rect = my_wd[0]; int in_dim = kernel_rect.hi[0] - kernel_rect.lo[0] + 1; int out_dim = kernel_rect.hi[1] - kernel_rect.lo[1] + 1; int batch_size = my_id[0].get_volume() / in_dim; assert(my_od[0].get_volume() == out_dim * batch_size); assert(my_id[0].get_volume() == in_dim * batch_size); assert(my_wd[1].get_volume() == out_dim); LinearMeta* m = (LinearMeta*) metas->meta[op]; Linear::forward_kernel(m, my_ip[0], my_op[0], my_wp[0], my_wp[1], in_dim, out_dim, batch_size); break; } case OP_BATCHMATMUL: { assert(fused->op_num_inputs[op] == 2); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); Domain out_domain = my_od[0]; Domain a_domain = my_id[0]; Domain b_domain = my_id[1]; int m = b_domain.hi()[0] - b_domain.lo()[0] + 1; assert(m == out_domain.hi()[0] - out_domain.lo()[0] + 1); int n = a_domain.hi()[1] - a_domain.lo()[1] + 1; assert(n == out_domain.hi()[1] - out_domain.lo()[1] + 1); int k = a_domain.hi()[0] - a_domain.lo()[0] + 1; assert(k == b_domain.hi()[1] - b_domain.lo()[1] + 1); assert(a_domain.get_dim() == b_domain.get_dim()); assert(a_domain.get_dim() == out_domain.get_dim()); int batch = 1; for (int i = 2; i < a_domain.get_dim(); i++) { int dim_size = a_domain.hi()[i] - a_domain.lo()[i] + 1; assert(dim_size == b_domain.hi()[i] - b_domain.lo()[i] + 1); assert(dim_size == out_domain.hi()[i] - out_domain.lo()[i] + 1); batch *= dim_size; } BatchMatmulMeta* meta = (BatchMatmulMeta*) metas->meta[op]; BatchMatmul::forward_kernel(meta, my_op[0], my_ip[0], my_ip[1], NULL, m, n, k, batch, meta->a_seq_length_dim, meta->b_seq_length_dim, fused->iter_config.seq_length); break; } case OP_EW_ADD: case OP_EW_SUB: case OP_EW_MUL: case OP_EW_DIV: { assert(fused->op_num_inputs[op] == 2); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0] == my_id[1]); assert(my_id[0] == my_od[0]); ElementBinaryMeta* m = (ElementBinaryMeta*) metas->meta[op]; ElementBinary::forward_kernel(m, my_ip[0], my_ip[1], my_op[0]); break; } case OP_RELU: case OP_SIGMOID: case OP_TANH: case OP_ELU: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0] == my_od[0]); ElementUnaryMeta* m = (ElementUnaryMeta*) metas->meta[op]; ElementUnary::forward_kernel(m, my_ip[0], my_op[0], my_id[0].get_volume()); break; } case OP_POOL2D: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); //assert(my_id[0] == my_od[0]); Pool2DMeta* m = (Pool2DMeta*) metas->meta[op]; Pool2D::forward_kernel(m, my_ip[0], my_op[0]); break; } case OP_FLAT: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_volume() == my_od[0].get_volume()); Flat::forward_kernel(my_ip[0], my_op[0], my_id[0].get_volume()); break; } case OP_RESHAPE: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_volume() == my_od[0].get_volume()); Reshape::forward_kernel(my_ip[0], my_op[0], my_id[0].get_volume()); break; } case OP_TRANSPOSE: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_volume() == my_od[0].get_volume()); TransposeMeta* m = (TransposeMeta*) metas->meta[op]; Transpose::forward_kernel(m, my_ip[0], my_op[0], my_id[0], my_od[0]); break; } default: { fprintf(stderr, "Fusion currently does not support type = %d\n", fused->op_op_type[op]); assert(false && "Fusion currently does not support type"); } } ioff += fused->op_num_inputs[op]; woff += fused->op_num_weights[op]; ooff += fused->op_num_outputs[op]; } //for (int i = 0; i < fused->numOutputs; i++) // print_tensor<float>(output_ptr[i], output_domain[i].get_volume(), "[Fused:forward:output]"); } void FusedOp::forward(const FFModel& ff) { // Set iter_config iter_config = ff.iter_config; ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(FUSEDOP_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); int offset = 0; for (int i = 0; i < numInputs; i++) { assert(input_lps[i] != LogicalPartition::NO_PART); assert(inputs[i].region != LogicalRegion::NO_REGION); launcher.add_region_requirement( RegionRequirement(input_lps[i], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[i].region)); launcher.add_field(offset+i, FID_DATA); } offset += numInputs; for (int i = 0; i < numWeights; i++) { assert(weights[i].region != LogicalRegion::NO_REGION); launcher.add_region_requirement( RegionRequirement(weights[i].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[i].region)); launcher.add_field(offset+i, FID_DATA); } offset += numWeights; for (int i = 0; i < numOutputs; i++) { assert(outputs[i].region != LogicalRegion::NO_REGION); launcher.add_region_requirement( RegionRequirement(outputs[i].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[i].region)); launcher.add_field(offset+i, FID_DATA); } runtime->execute_index_space(ctx, launcher); } /* regions[...](I): input regions[...](I): weight regions[...](I): output regions[...](I/O): input_grad regions[...](I/O): weight_grad regions[...](I/O): output_grad */ __host__ void FusedOp::backward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { // const FusedOp* fused = (FusedOp*) task->args; const FusedOpMeta* metas = *((FusedOpMeta**) task->local_args); const FusedOp* fused = metas->fused_op; assert(metas->numOperators == fused->numOperators); assert(regions.size() == task->regions.size()); { int sum = fused->numInputs + fused->numWeights + fused->numOutputs; assert(sum*2 == (int)regions.size()); } Domain input_domain[MAX_NUM_INPUTS], input_grad_domain[MAX_NUM_INPUTS]; Domain weight_domain[MAX_NUM_WEIGHTS], weight_grad_domain[MAX_NUM_WEIGHTS]; Domain output_domain[MAX_NUM_OUTPUTS], output_grad_domain[MAX_NUM_OUTPUTS]; const float* input_ptr[MAX_NUM_INPUTS]; float* input_grad_ptr[MAX_NUM_INPUTS]; const float* weight_ptr[MAX_NUM_WEIGHTS]; float* weight_grad_ptr[MAX_NUM_WEIGHTS]; const float* output_ptr[MAX_NUM_OUTPUTS]; float* output_grad_ptr[MAX_NUM_OUTPUTS]; int roff = 0; assert(fused->numInputs <= MAX_NUM_INPUTS); for (int i = 0; i < fused->numInputs; i++) { input_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i].region.get_index_space()); input_ptr[i] = helperGetTensorPointerRO<float>( regions[i], task->regions[i], FID_DATA, ctx, runtime); } roff += fused->numInputs; assert(fused->numWeights <= MAX_NUM_WEIGHTS); for (int i = 0; i < fused->numWeights; i++) { weight_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); weight_ptr[i] = helperGetTensorPointerRO<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); } roff += fused->numWeights; assert(fused->numOutputs <= MAX_NUM_OUTPUTS); for (int i = 0; i < fused->numOutputs; i++) { output_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); output_ptr[i] = helperGetTensorPointerRO<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); } roff += fused->numOutputs; for (int i = 0; i < fused->numInputs; i++) { input_grad_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); input_grad_ptr[i] = helperGetTensorPointerRW<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); assert(input_grad_domain[i] == input_domain[i]); } roff += fused->numInputs; for (int i = 0; i < fused->numWeights; i++) { weight_grad_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); weight_grad_ptr[i] = helperGetTensorPointerRW<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); assert(weight_grad_domain[i].get_volume() == weight_domain[i].get_volume()); } roff += fused->numWeights; for (int i = 0; i < fused->numOutputs; i++) { output_grad_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); output_grad_ptr[i] = helperGetTensorPointerRW<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); assert(output_grad_domain[i] == output_domain[i]); } roff += fused->numOutputs; // Assert that all meta share the same dnn/blas handler int start = 0; for (start = 0; start < fused->numOperators; start++) if (metas->meta[start] != NULL) break; for (int op = start+1; op < fused->numOperators; op++) if (metas->meta[op] != NULL) { assert(metas->meta[start]->handle.blas == metas->meta[op]->handle.blas); assert(metas->meta[start]->handle.dnn == metas->meta[op]->handle.dnn); } #ifndef DISABLE_LEGION_CUDA_HIJACK if (start < fused->numOperators) { hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDA(hipblasSetStream(metas->meta[start]->handle.blas, stream)); checkCUDNN(cudnnSetStream(metas->meta[start]->handle.dnn, stream)); } #endif int ioff = 0, woff = 0, ooff = 0; Domain my_id[MAX_NUM_INPUTS], my_grad_id[MAX_NUM_INPUTS]; Domain my_wd[MAX_NUM_WEIGHTS], my_grad_wd[MAX_NUM_WEIGHTS]; Domain my_od[MAX_NUM_OUTPUTS], my_grad_od[MAX_NUM_OUTPUTS]; const float* my_ip[MAX_NUM_INPUTS]; const float* my_wp[MAX_NUM_WEIGHTS]; const float* my_op[MAX_NUM_OUTPUTS]; float* my_grad_ip[MAX_NUM_INPUTS]; float* my_grad_wp[MAX_NUM_WEIGHTS]; float* my_grad_op[MAX_NUM_OUTPUTS]; // Do backpropagation in the reverse ordering for (int op = 0; op < fused->numOperators; op++) { ioff += fused->op_num_inputs[op]; woff += fused->op_num_weights[op]; ooff += fused->op_num_outputs[op]; } for (int op = fused->numOperators-1; op >= 0; op--) { ioff -= fused->op_num_inputs[op]; woff -= fused->op_num_weights[op]; ooff -= fused->op_num_outputs[op]; for (int i = 0; i < fused->op_num_inputs[op]; i++) { int my_off = fused->op_input_idx[i+ioff]; if (fused->op_input_source[i+ioff] == SOURCE_INPUT) { my_id[i] = input_domain[my_off]; my_ip[i] = input_ptr[my_off]; my_grad_id[i] = input_grad_domain[my_off]; my_grad_ip[i] = input_grad_ptr[my_off]; assert(my_grad_id[i] == my_id[i]); } else if (fused->op_input_source[i+ioff] == SOURCE_OUTPUT) { my_id[i] = output_domain[my_off]; my_ip[i] = output_ptr[my_off]; my_grad_id[i] = output_grad_domain[my_off]; my_grad_ip[i] = output_grad_ptr[my_off]; assert(my_grad_id[i] == my_id[i]); } else assert(false); } for (int i = 0; i < fused->op_num_weights[op]; i++) { assert(fused->op_weight_source[i+woff] == SOURCE_WEIGHT); my_wd[i] = weight_domain[fused->op_weight_idx[i+woff]]; my_wp[i] = weight_ptr[fused->op_weight_idx[i+woff]]; my_grad_wd[i] = weight_grad_domain[fused->op_weight_idx[i+woff]]; my_grad_wp[i] = weight_grad_ptr[fused->op_weight_idx[i+woff]]; assert(my_grad_wd[i].get_volume() == my_wd[i].get_volume()); } for (int i = 0; i < fused->op_num_outputs[op]; i++) { assert(fused->op_output_source[i+ooff] == SOURCE_OUTPUT); my_od[i] = output_domain[fused->op_output_idx[i+ooff]]; my_op[i] = output_ptr[fused->op_output_idx[i+ooff]]; my_grad_od[i] = output_grad_domain[fused->op_output_idx[i+ooff]]; my_grad_op[i] = output_grad_ptr[fused->op_output_idx[i+ooff]]; assert(my_grad_od[i] == my_od[i]); } switch (fused->op_op_type[op]) { case OP_CONCAT: { assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); ConcatMeta* m = (ConcatMeta*) metas->meta[op]; int num_inputs = fused->op_num_inputs[op]; Concat::backward_kernel(my_grad_op[0], my_grad_ip, num_inputs, m->axis, my_grad_od[0], my_grad_id); break; } case OP_CONV2D: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_dim() == 4); assert(my_wd[0].get_dim() == 4); assert(my_od[0].get_dim() == 4); Conv2DMeta* m = (Conv2DMeta*) metas->meta[op]; Conv2D::backward_kernel(m, my_ip[0], my_grad_ip[0], my_op[0], my_grad_op[0], my_wp[0], my_grad_wp[0], my_grad_wp[1]); break; } case OP_BATCHNORM: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_dim() == 4); assert(my_wd[0].get_dim() == 1); assert(my_wd[1].get_dim() == 1); assert(my_od[0].get_dim() == 4); BatchNormMeta* m = (BatchNormMeta*) metas->meta[op]; BatchNorm::backward_kernel(m, my_ip[0], my_grad_op[0], my_op[0], my_grad_ip[0], my_wp[0], my_grad_wp[0], my_grad_wp[1], my_od[0].get_volume()); break; } case OP_DROPOUT: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); DropoutMeta* m = (DropoutMeta*) metas->meta[op]; Dropout::backward_kernel(m, my_grad_op[0], my_grad_ip[0]); break; } case OP_LINEAR: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 2); assert(fused->op_num_outputs[op] == 1); Rect<2> kernel_rect = my_wd[0]; int in_dim = kernel_rect.hi[0] - kernel_rect.lo[0] + 1; int out_dim = kernel_rect.hi[1] - kernel_rect.lo[1] + 1; int batch_size = my_id[0].get_volume() / in_dim; assert(my_od[0].get_volume() == out_dim * batch_size); assert(my_id[0].get_volume() == in_dim * batch_size); assert(my_wd[1].get_volume() == out_dim); LinearMeta* m = (LinearMeta*) metas->meta[op]; Linear::backward_kernel(m, my_ip[0], my_grad_ip[0], my_op[0], my_grad_op[0], my_wp[0], my_grad_wp[0], my_grad_wp[1], in_dim, out_dim, batch_size); break; } case OP_BATCHMATMUL: { assert(fused->op_num_inputs[op] == 2); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); Domain out_domain = my_od[0]; Domain a_domain = my_id[0]; Domain b_domain = my_id[1]; // check dims int m = b_domain.hi()[0] - b_domain.lo()[0] + 1; assert(m == out_domain.hi()[0] - out_domain.lo()[0] + 1); int n = a_domain.hi()[1] - a_domain.lo()[1] + 1; assert(n == out_domain.hi()[1] - out_domain.lo()[1] + 1); int k = a_domain.hi()[0] - a_domain.lo()[0] + 1; assert(k == b_domain.hi()[1] - b_domain.lo()[1] + 1); assert(a_domain.get_dim() == b_domain.get_dim()); assert(a_domain.get_dim() == out_domain.get_dim()); int batch = 1; for (int i = 2; i < a_domain.get_dim(); i++) { int dim_size = a_domain.hi()[i] - a_domain.lo()[i] + 1; assert(dim_size == b_domain.hi()[i] - b_domain.lo()[i] + 1); assert(dim_size == out_domain.hi()[i] - out_domain.lo()[i] + 1); batch *= dim_size; } BatchMatmulMeta* meta = (BatchMatmulMeta*) metas->meta[op]; BatchMatmul::backward_kernel(meta, my_op[0], my_grad_op[0], my_ip[0], my_grad_ip[0], my_ip[1], my_grad_ip[1], NULL, m, n, k, batch); break; } case OP_EW_ADD: case OP_EW_SUB: case OP_EW_MUL: case OP_EW_DIV: { assert(fused->op_num_inputs[op] == 2); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0] == my_id[1]); assert(my_id[0] == my_od[0]); ElementBinaryMeta* m = (ElementBinaryMeta*) metas->meta[op]; ElementBinary::backward_kernel(m, my_grad_op[0], my_ip[0], my_ip[1], my_grad_ip[0], my_grad_ip[1]); break; } case OP_RELU: case OP_SIGMOID: case OP_TANH: case OP_ELU: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0] == my_od[0]); ElementUnaryMeta* m = (ElementUnaryMeta*) metas->meta[op]; ElementUnary::backward_kernel(m, my_ip[0], my_grad_ip[0], my_op[0], my_grad_op[0], my_id[0].get_volume()); break; } case OP_POOL2D: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); //assert(my_id[0] == my_od[0]); Pool2DMeta* m = (Pool2DMeta*) metas->meta[op]; Pool2D::backward_kernel(m, my_ip[0], my_grad_ip[0], my_op[0], my_grad_op[0]); break; } case OP_FLAT: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_grad_id[0].get_volume() == my_grad_od[0].get_volume()); Flat::backward_kernel(my_grad_ip[0], my_grad_op[0], my_grad_id[0].get_volume()); break; } case OP_RESHAPE: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_grad_id[0].get_volume() == my_grad_od[0].get_volume()); Reshape::backward_kernel(my_grad_ip[0], my_grad_op[0], my_grad_id[0].get_volume()); break; } case OP_TRANSPOSE: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_grad_id[0].get_volume() == my_grad_od[0].get_volume()); TransposeMeta* m = (TransposeMeta*) metas->meta[op]; Transpose::backward_kernel(m, my_grad_ip[0], my_grad_op[0], my_grad_id[0], my_grad_od[0]); break; } default: assert(false && "Fusion currently does not support type"); } } assert(ioff == 0); assert(woff == 0); assert(ooff == 0); //for (int i = 0; i < fused->numWeights; i++) // print_tensor<float>(weight_grad_ptr[i], weight_grad_domain[i].get_volume(), "[Fused:backward:weight_grad]"); //for (int i = 0; i < fused->numInputs; i++) // print_tensor<float>(input_grad_ptr[i], input_grad_domain[i].get_volume(), "[Fused:backward:input_grad]"); //for (int i = 0; i < fused->numOutputs; i++) // print_tensor<float>(output_grad_ptr[i], output_grad_domain[i].get_volume(), "[Fused:backward:output_grad]"); } void FusedOp::backward(const FFModel& ff) { // Set iter_config iter_config = ff.iter_config; ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(FUSEDOP_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(FusedOp)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); int idx = 0; for (int i = 0; i < numInputs; i++) { launcher.add_region_requirement( RegionRequirement(input_lps[i], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[i].region)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numWeights; i++) { launcher.add_region_requirement( RegionRequirement(weights[i].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[i].region)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numOutputs; i++) { launcher.add_region_requirement( RegionRequirement(outputs[i].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[i].region)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numInputs; i++) { launcher.add_region_requirement( RegionRequirement(input_grad_lps[i], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[i].region_grad)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numWeights; i++) { launcher.add_region_requirement( RegionRequirement(weights[i].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[i].region_grad)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numOutputs; i++) { launcher.add_region_requirement( RegionRequirement(outputs[i].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[i].region_grad)); launcher.add_field(idx++, FID_DATA); } runtime->execute_index_space(ctx, launcher); } bool FusedOp::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { // The search should happen before fusion assert(false); return false; }
7172781d2e8fd5185da835de94c0d3544b382d6f.cu
/* Copyright 2020 Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" FusedOp::FusedOp(FFModel& model, Op* op) : Op(model, OP_FUSED, op->name, 0) { numInputs = op->numInputs; for (int i = 0; i < numInputs; i++) { inputs[i] = op->inputs[i]; input_lps[i] = op->input_lps[i]; input_grad_lps[i] = op->input_grad_lps[i]; } numWeights = op->numWeights; for (int i = 0; i < numWeights; i++) { weights[i] = op->weights[i]; weights[i].owner_op = this; weights[i].owner_idx = i; } numOutputs = op->numOutputs; for (int i = 0; i < numOutputs; i++) { outputs[i] = op->outputs[i]; outputs[i].owner_op = this; outputs[i].owner_idx = i; } numOperators = 1; op_num_inputs[0] = numInputs; op_num_weights[0] = numWeights; op_num_outputs[0] = numOutputs; op_op_type[0] = op->op_type; operators[0] = op; for (int i = 0; i < numInputs; i++) { op_input_source[i] = SOURCE_INPUT; op_input_idx[i] = i; } for (int i = 0; i < numWeights; i++) { op_weight_source[i] = SOURCE_WEIGHT; op_weight_idx[i] = i; } for (int i = 0; i < numOutputs; i++) { op_output_source[i] = SOURCE_OUTPUT; op_output_idx[i] = i; } task_is = op->task_is; } bool FusedOp::add_operator(FFModel& model, Op* op) { Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; // Currently assume fusion optimization is performed // after create_weights and create_outputs // So task_is and op->task_is are not empty Domain my_domain = runtime->get_index_space_domain(ctx, task_is); Domain op_domain = runtime->get_index_space_domain(ctx, op->task_is); ParallelConfig my_config, op_config; assert(model.config.find_parallel_config(my_domain.get_dim(), name, my_config)); assert(model.config.find_parallel_config(op_domain.get_dim(), op->name, op_config)); if (my_config == op_config) { // Do nothing } else { return false; } int input_offset = 0, weight_offset = 0, output_offset = 0; for (int i = 0; i < numOperators; i++) { input_offset += op_num_inputs[i]; weight_offset += op_num_weights[i]; output_offset += op_num_outputs[i]; } if ((input_offset + op->numInputs > MAX_NUM_FUSED_TENSORS) || (weight_offset + op->numWeights > MAX_NUM_FUSED_TENSORS) || (output_offset + op->numOutputs > MAX_NUM_FUSED_TENSORS)) { fprintf(stderr, "Cannot fuse. Consider increase MAX_NUM_FUSED_TENSORS\n"); return false; } if (numOperators + 1 > MAX_NUM_FUSED_OPERATORS) { fprintf(stderr, "Reach to the fusion limit. Consider increase MAX_NUM_FUSED_OPERATORS"); return false; } // Set inputs for (int i = 0; i < op->numInputs; i++) { bool found = false; for (int j = 0; j < input_offset; j++) if (inputs[j].region == op->inputs[i].region) { // This input is one of my inputs assert(!found); assert(inputs[j].region != LogicalRegion::NO_REGION); op_input_source[input_offset + i] = SOURCE_INPUT; op_input_idx[input_offset + i] = j; found = true; break; } for (int j = 0; j < output_offset; j++) if (outputs[j].region == op->inputs[i].region) { // This input is one of my outputs assert(!found); assert(outputs[j].region != LogicalRegion::NO_REGION); op_input_source[input_offset + i] = SOURCE_OUTPUT; op_input_idx[input_offset + i] = j; found = true; break; } if (found) { // Do nothing } else { inputs[numInputs] = op->inputs[i]; input_lps[numInputs] = op->input_lps[i]; input_grad_lps[numInputs] = op->input_grad_lps[i]; op_input_source[input_offset+i] = SOURCE_INPUT; op_input_idx[input_offset+i] = numInputs; numInputs += 1; } } // Set weights for (int i = 0; i < op->numWeights; i++) { bool found = false; for (int j = 0; j < numWeights; j++) if (weights[j].region == op->weights[i].region) { assert(!found); assert(weights[j].region != LogicalRegion::NO_REGION); op_weight_source[weight_offset + i] = SOURCE_WEIGHT; op_weight_idx[weight_offset + i] = j; found = true; break; } if (found) { // Do nothing } else { weights[numWeights] = op->weights[i]; weights[numWeights].owner_op = this; weights[numWeights].owner_idx = numWeights; op_weight_source[weight_offset+i] = SOURCE_WEIGHT; op_weight_idx[weight_offset+i] = numWeights; numWeights += 1; } } // Set outputs for (int i = 0; i < op->numOutputs; i++) { outputs[numOutputs] = op->outputs[i]; outputs[numOutputs].owner_op = this; outputs[numOutputs].owner_idx = numOutputs; op_output_source[output_offset+i] = SOURCE_OUTPUT; op_output_idx[output_offset+i] = numOutputs; numOutputs += 1; } assert(op->numInputs > 0); assert(op->numWeights >= 0); assert(op->numOutputs > 0); op_num_inputs[numOperators] = op->numInputs; op_num_weights[numOperators] = op->numWeights; op_num_outputs[numOperators] = op->numOutputs; op_op_type[numOperators] = op->op_type; operators[numOperators] = op; numOperators += 1; assert(numOperators <= MAX_NUM_FUSED_OPERATORS); if (numInputs > MAX_NUM_INPUTS) { fprintf(stderr, "Reach to the #inputs limit during fusion.\n" "Consider increase MAX_NUM_INPUTS to allow more fusions.\n"); return false; } if (numWeights > MAX_NUM_WEIGHTS) { fprintf(stderr, "Reach to the #weights limit during fusion.\n" "Consider increase MAX_NUM_WEIGHTS to allow more fusions.\n"); return false; } if (numOutputs > MAX_NUM_OUTPUTS) { fprintf(stderr, "Reach to the #outputs limit during fusion.\n" "Consider increase MAX_NUM_OUTPUTS to allow more fusions.\n"); } return true; } void FusedOp::create_weights(FFModel& model) { assert(false && "Weights should be created before fusion optimizations"); } void FusedOp::create_output_and_partition(FFModel& model) { assert(false && "Outputs should be created before fusion optimizations"); } OpMeta* FusedOp::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { const FusedOp* fused = (FusedOp*) task->args; const FusedOpMeta* metas = (FusedOpMeta*) task->local_args; FusedOpMeta* local_meta = new FusedOpMeta(); memcpy(local_meta, metas, sizeof(FusedOpMeta)); local_meta->fused_op = (FusedOp*) malloc(sizeof(FusedOp)); memcpy(local_meta->fused_op, fused, sizeof(FusedOp)); return ((OpMeta*)local_meta); } void FusedOp::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; // Call init methods in individual operators Domain domain = runtime->get_index_space_domain(ctx, task_is); for (int i = 0; i < numOperators; i++) { operators[i]->init(ff); for (size_t j = 0; j < domain.get_volume(); j++) fused_meta[j].meta[i] = operators[i]->meta[j]; } for (size_t j = 0; j < domain.get_volume(); j++) fused_meta[j].numOperators = numOperators; switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ argmap.set_point(*it, TaskArgument(&fused_meta[idx++], sizeof(FusedOpMeta))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(FUSEDOP_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(FusedOp)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ meta[idx++] = fm.get_result<OpMeta*>(*it); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[...](I): inputs regions[...](I): weights regions[...](I): outputs */ __host__ void FusedOp::forward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { //const FusedOp* fused = (FusedOp*) task->args; const FusedOpMeta* metas = *((FusedOpMeta**) task->local_args); const FusedOp* fused = metas->fused_op; assert(metas->numOperators == fused->numOperators); assert(regions.size() == task->regions.size()); assert((int)regions.size() == fused->numInputs+fused->numWeights+fused->numOutputs); Domain input_domain[MAX_NUM_INPUTS]; Domain weight_domain[MAX_NUM_WEIGHTS]; Domain output_domain[MAX_NUM_OUTPUTS]; const float* input_ptr[MAX_NUM_INPUTS]; const float* weight_ptr[MAX_NUM_WEIGHTS]; float* output_ptr[MAX_NUM_OUTPUTS]; assert(fused->numInputs <= MAX_NUM_INPUTS); for (int i = 0; i < fused->numInputs; i++) { input_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i].region.get_index_space()); input_ptr[i] = helperGetTensorPointerRO<float>( regions[i], task->regions[i], FID_DATA, ctx, runtime); } int roff = fused->numInputs; assert(fused->numWeights <= MAX_NUM_WEIGHTS); for (int i = 0; i < fused->numWeights; i++) { weight_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); weight_ptr[i] = helperGetTensorPointerRO<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); } roff += fused->numWeights; assert(fused->numOutputs <= MAX_NUM_OUTPUTS); for (int i = 0; i < fused->numOutputs; i++) { output_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); output_ptr[i] = helperGetTensorPointerWO<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); } // Assert that all meta share the same dnn/blas handler int start = 0; for (start = 0; start < fused->numOperators; start++) if (metas->meta[start] != NULL) break; for (int op = start+1; op < fused->numOperators; op++) if (metas->meta[op] != NULL) { assert(metas->meta[start]->handle.blas == metas->meta[op]->handle.blas); assert(metas->meta[start]->handle.dnn == metas->meta[op]->handle.dnn); } #ifndef DISABLE_LEGION_CUDA_HIJACK if (start < fused->numOperators) { cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDA(cublasSetStream(metas->meta[start]->handle.blas, stream)); checkCUDNN(cudnnSetStream(metas->meta[start]->handle.dnn, stream)); } #endif int ioff = 0, woff = 0, ooff = 0; for (int op = 0; op < fused->numOperators; op++) { Domain my_id[MAX_NUM_INPUTS]; Domain my_wd[MAX_NUM_WEIGHTS]; Domain my_od[MAX_NUM_OUTPUTS]; const float* my_ip[MAX_NUM_INPUTS]; const float* my_wp[MAX_NUM_WEIGHTS]; float* my_op[MAX_NUM_OUTPUTS]; for (int i = 0; i < fused->op_num_inputs[op]; i++) { int my_off = fused->op_input_idx[i+ioff]; if (fused->op_input_source[i+ioff] == SOURCE_INPUT) { my_id[i] = input_domain[my_off]; my_ip[i] = input_ptr[my_off]; } else if (fused->op_input_source[i+ioff] == SOURCE_OUTPUT) { my_id[i] = output_domain[my_off]; my_ip[i] = output_ptr[my_off]; } else assert(false); } for (int i = 0; i < fused->op_num_weights[op]; i++) { assert(fused->op_weight_source[i+woff] == SOURCE_WEIGHT); my_wd[i] = weight_domain[fused->op_weight_idx[i+woff]]; my_wp[i] = weight_ptr[fused->op_weight_idx[i+woff]]; } for (int i = 0; i < fused->op_num_outputs[op]; i++) { assert(fused->op_output_source[i+ooff] == SOURCE_OUTPUT); my_od[i] = output_domain[fused->op_output_idx[i+ooff]]; my_op[i] = output_ptr[fused->op_output_idx[i+ooff]]; } switch(fused->op_op_type[op]) { case OP_CONCAT: { assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); ConcatMeta* m = (ConcatMeta*) metas->meta[op]; int num_inputs = fused->op_num_inputs[op]; Concat::forward_kernel(my_op[0], my_ip, num_inputs, m->axis, my_od[0], my_id); break; } case OP_CONV2D: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_dim() == 4); assert(my_wd[0].get_dim() == 4); assert(my_od[0].get_dim() == 4); Conv2DMeta* m = (Conv2DMeta*) metas->meta[op]; Conv2D::forward_kernel(m, my_ip[0], my_op[0], my_wp[0], my_wp[1]); break; } case OP_BATCHNORM: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_dim() == 4); assert(my_od[0].get_dim() == 4); assert(my_wd[0].get_dim() == 1); assert(my_wd[1].get_dim() == 1); BatchNormMeta* m = (BatchNormMeta*) metas->meta[op]; BatchNorm::forward_kernel(m, my_ip[0], my_op[0], my_wp[0], my_wp[1]); break; } case OP_DROPOUT: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); DropoutMeta* m = (DropoutMeta*) metas->meta[op]; Dropout::forward_kernel(m, my_ip[0], my_op[0]); break; } case OP_LINEAR: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 2); assert(fused->op_num_outputs[op] == 1); Rect<2> kernel_rect = my_wd[0]; int in_dim = kernel_rect.hi[0] - kernel_rect.lo[0] + 1; int out_dim = kernel_rect.hi[1] - kernel_rect.lo[1] + 1; int batch_size = my_id[0].get_volume() / in_dim; assert(my_od[0].get_volume() == out_dim * batch_size); assert(my_id[0].get_volume() == in_dim * batch_size); assert(my_wd[1].get_volume() == out_dim); LinearMeta* m = (LinearMeta*) metas->meta[op]; Linear::forward_kernel(m, my_ip[0], my_op[0], my_wp[0], my_wp[1], in_dim, out_dim, batch_size); break; } case OP_BATCHMATMUL: { assert(fused->op_num_inputs[op] == 2); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); Domain out_domain = my_od[0]; Domain a_domain = my_id[0]; Domain b_domain = my_id[1]; int m = b_domain.hi()[0] - b_domain.lo()[0] + 1; assert(m == out_domain.hi()[0] - out_domain.lo()[0] + 1); int n = a_domain.hi()[1] - a_domain.lo()[1] + 1; assert(n == out_domain.hi()[1] - out_domain.lo()[1] + 1); int k = a_domain.hi()[0] - a_domain.lo()[0] + 1; assert(k == b_domain.hi()[1] - b_domain.lo()[1] + 1); assert(a_domain.get_dim() == b_domain.get_dim()); assert(a_domain.get_dim() == out_domain.get_dim()); int batch = 1; for (int i = 2; i < a_domain.get_dim(); i++) { int dim_size = a_domain.hi()[i] - a_domain.lo()[i] + 1; assert(dim_size == b_domain.hi()[i] - b_domain.lo()[i] + 1); assert(dim_size == out_domain.hi()[i] - out_domain.lo()[i] + 1); batch *= dim_size; } BatchMatmulMeta* meta = (BatchMatmulMeta*) metas->meta[op]; BatchMatmul::forward_kernel(meta, my_op[0], my_ip[0], my_ip[1], NULL, m, n, k, batch, meta->a_seq_length_dim, meta->b_seq_length_dim, fused->iter_config.seq_length); break; } case OP_EW_ADD: case OP_EW_SUB: case OP_EW_MUL: case OP_EW_DIV: { assert(fused->op_num_inputs[op] == 2); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0] == my_id[1]); assert(my_id[0] == my_od[0]); ElementBinaryMeta* m = (ElementBinaryMeta*) metas->meta[op]; ElementBinary::forward_kernel(m, my_ip[0], my_ip[1], my_op[0]); break; } case OP_RELU: case OP_SIGMOID: case OP_TANH: case OP_ELU: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0] == my_od[0]); ElementUnaryMeta* m = (ElementUnaryMeta*) metas->meta[op]; ElementUnary::forward_kernel(m, my_ip[0], my_op[0], my_id[0].get_volume()); break; } case OP_POOL2D: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); //assert(my_id[0] == my_od[0]); Pool2DMeta* m = (Pool2DMeta*) metas->meta[op]; Pool2D::forward_kernel(m, my_ip[0], my_op[0]); break; } case OP_FLAT: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_volume() == my_od[0].get_volume()); Flat::forward_kernel(my_ip[0], my_op[0], my_id[0].get_volume()); break; } case OP_RESHAPE: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_volume() == my_od[0].get_volume()); Reshape::forward_kernel(my_ip[0], my_op[0], my_id[0].get_volume()); break; } case OP_TRANSPOSE: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_volume() == my_od[0].get_volume()); TransposeMeta* m = (TransposeMeta*) metas->meta[op]; Transpose::forward_kernel(m, my_ip[0], my_op[0], my_id[0], my_od[0]); break; } default: { fprintf(stderr, "Fusion currently does not support type = %d\n", fused->op_op_type[op]); assert(false && "Fusion currently does not support type"); } } ioff += fused->op_num_inputs[op]; woff += fused->op_num_weights[op]; ooff += fused->op_num_outputs[op]; } //for (int i = 0; i < fused->numOutputs; i++) // print_tensor<float>(output_ptr[i], output_domain[i].get_volume(), "[Fused:forward:output]"); } void FusedOp::forward(const FFModel& ff) { // Set iter_config iter_config = ff.iter_config; ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(FUSEDOP_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); int offset = 0; for (int i = 0; i < numInputs; i++) { assert(input_lps[i] != LogicalPartition::NO_PART); assert(inputs[i].region != LogicalRegion::NO_REGION); launcher.add_region_requirement( RegionRequirement(input_lps[i], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[i].region)); launcher.add_field(offset+i, FID_DATA); } offset += numInputs; for (int i = 0; i < numWeights; i++) { assert(weights[i].region != LogicalRegion::NO_REGION); launcher.add_region_requirement( RegionRequirement(weights[i].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[i].region)); launcher.add_field(offset+i, FID_DATA); } offset += numWeights; for (int i = 0; i < numOutputs; i++) { assert(outputs[i].region != LogicalRegion::NO_REGION); launcher.add_region_requirement( RegionRequirement(outputs[i].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[i].region)); launcher.add_field(offset+i, FID_DATA); } runtime->execute_index_space(ctx, launcher); } /* regions[...](I): input regions[...](I): weight regions[...](I): output regions[...](I/O): input_grad regions[...](I/O): weight_grad regions[...](I/O): output_grad */ __host__ void FusedOp::backward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { // const FusedOp* fused = (FusedOp*) task->args; const FusedOpMeta* metas = *((FusedOpMeta**) task->local_args); const FusedOp* fused = metas->fused_op; assert(metas->numOperators == fused->numOperators); assert(regions.size() == task->regions.size()); { int sum = fused->numInputs + fused->numWeights + fused->numOutputs; assert(sum*2 == (int)regions.size()); } Domain input_domain[MAX_NUM_INPUTS], input_grad_domain[MAX_NUM_INPUTS]; Domain weight_domain[MAX_NUM_WEIGHTS], weight_grad_domain[MAX_NUM_WEIGHTS]; Domain output_domain[MAX_NUM_OUTPUTS], output_grad_domain[MAX_NUM_OUTPUTS]; const float* input_ptr[MAX_NUM_INPUTS]; float* input_grad_ptr[MAX_NUM_INPUTS]; const float* weight_ptr[MAX_NUM_WEIGHTS]; float* weight_grad_ptr[MAX_NUM_WEIGHTS]; const float* output_ptr[MAX_NUM_OUTPUTS]; float* output_grad_ptr[MAX_NUM_OUTPUTS]; int roff = 0; assert(fused->numInputs <= MAX_NUM_INPUTS); for (int i = 0; i < fused->numInputs; i++) { input_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i].region.get_index_space()); input_ptr[i] = helperGetTensorPointerRO<float>( regions[i], task->regions[i], FID_DATA, ctx, runtime); } roff += fused->numInputs; assert(fused->numWeights <= MAX_NUM_WEIGHTS); for (int i = 0; i < fused->numWeights; i++) { weight_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); weight_ptr[i] = helperGetTensorPointerRO<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); } roff += fused->numWeights; assert(fused->numOutputs <= MAX_NUM_OUTPUTS); for (int i = 0; i < fused->numOutputs; i++) { output_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); output_ptr[i] = helperGetTensorPointerRO<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); } roff += fused->numOutputs; for (int i = 0; i < fused->numInputs; i++) { input_grad_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); input_grad_ptr[i] = helperGetTensorPointerRW<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); assert(input_grad_domain[i] == input_domain[i]); } roff += fused->numInputs; for (int i = 0; i < fused->numWeights; i++) { weight_grad_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); weight_grad_ptr[i] = helperGetTensorPointerRW<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); assert(weight_grad_domain[i].get_volume() == weight_domain[i].get_volume()); } roff += fused->numWeights; for (int i = 0; i < fused->numOutputs; i++) { output_grad_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); output_grad_ptr[i] = helperGetTensorPointerRW<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); assert(output_grad_domain[i] == output_domain[i]); } roff += fused->numOutputs; // Assert that all meta share the same dnn/blas handler int start = 0; for (start = 0; start < fused->numOperators; start++) if (metas->meta[start] != NULL) break; for (int op = start+1; op < fused->numOperators; op++) if (metas->meta[op] != NULL) { assert(metas->meta[start]->handle.blas == metas->meta[op]->handle.blas); assert(metas->meta[start]->handle.dnn == metas->meta[op]->handle.dnn); } #ifndef DISABLE_LEGION_CUDA_HIJACK if (start < fused->numOperators) { cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDA(cublasSetStream(metas->meta[start]->handle.blas, stream)); checkCUDNN(cudnnSetStream(metas->meta[start]->handle.dnn, stream)); } #endif int ioff = 0, woff = 0, ooff = 0; Domain my_id[MAX_NUM_INPUTS], my_grad_id[MAX_NUM_INPUTS]; Domain my_wd[MAX_NUM_WEIGHTS], my_grad_wd[MAX_NUM_WEIGHTS]; Domain my_od[MAX_NUM_OUTPUTS], my_grad_od[MAX_NUM_OUTPUTS]; const float* my_ip[MAX_NUM_INPUTS]; const float* my_wp[MAX_NUM_WEIGHTS]; const float* my_op[MAX_NUM_OUTPUTS]; float* my_grad_ip[MAX_NUM_INPUTS]; float* my_grad_wp[MAX_NUM_WEIGHTS]; float* my_grad_op[MAX_NUM_OUTPUTS]; // Do backpropagation in the reverse ordering for (int op = 0; op < fused->numOperators; op++) { ioff += fused->op_num_inputs[op]; woff += fused->op_num_weights[op]; ooff += fused->op_num_outputs[op]; } for (int op = fused->numOperators-1; op >= 0; op--) { ioff -= fused->op_num_inputs[op]; woff -= fused->op_num_weights[op]; ooff -= fused->op_num_outputs[op]; for (int i = 0; i < fused->op_num_inputs[op]; i++) { int my_off = fused->op_input_idx[i+ioff]; if (fused->op_input_source[i+ioff] == SOURCE_INPUT) { my_id[i] = input_domain[my_off]; my_ip[i] = input_ptr[my_off]; my_grad_id[i] = input_grad_domain[my_off]; my_grad_ip[i] = input_grad_ptr[my_off]; assert(my_grad_id[i] == my_id[i]); } else if (fused->op_input_source[i+ioff] == SOURCE_OUTPUT) { my_id[i] = output_domain[my_off]; my_ip[i] = output_ptr[my_off]; my_grad_id[i] = output_grad_domain[my_off]; my_grad_ip[i] = output_grad_ptr[my_off]; assert(my_grad_id[i] == my_id[i]); } else assert(false); } for (int i = 0; i < fused->op_num_weights[op]; i++) { assert(fused->op_weight_source[i+woff] == SOURCE_WEIGHT); my_wd[i] = weight_domain[fused->op_weight_idx[i+woff]]; my_wp[i] = weight_ptr[fused->op_weight_idx[i+woff]]; my_grad_wd[i] = weight_grad_domain[fused->op_weight_idx[i+woff]]; my_grad_wp[i] = weight_grad_ptr[fused->op_weight_idx[i+woff]]; assert(my_grad_wd[i].get_volume() == my_wd[i].get_volume()); } for (int i = 0; i < fused->op_num_outputs[op]; i++) { assert(fused->op_output_source[i+ooff] == SOURCE_OUTPUT); my_od[i] = output_domain[fused->op_output_idx[i+ooff]]; my_op[i] = output_ptr[fused->op_output_idx[i+ooff]]; my_grad_od[i] = output_grad_domain[fused->op_output_idx[i+ooff]]; my_grad_op[i] = output_grad_ptr[fused->op_output_idx[i+ooff]]; assert(my_grad_od[i] == my_od[i]); } switch (fused->op_op_type[op]) { case OP_CONCAT: { assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); ConcatMeta* m = (ConcatMeta*) metas->meta[op]; int num_inputs = fused->op_num_inputs[op]; Concat::backward_kernel(my_grad_op[0], my_grad_ip, num_inputs, m->axis, my_grad_od[0], my_grad_id); break; } case OP_CONV2D: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_dim() == 4); assert(my_wd[0].get_dim() == 4); assert(my_od[0].get_dim() == 4); Conv2DMeta* m = (Conv2DMeta*) metas->meta[op]; Conv2D::backward_kernel(m, my_ip[0], my_grad_ip[0], my_op[0], my_grad_op[0], my_wp[0], my_grad_wp[0], my_grad_wp[1]); break; } case OP_BATCHNORM: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_dim() == 4); assert(my_wd[0].get_dim() == 1); assert(my_wd[1].get_dim() == 1); assert(my_od[0].get_dim() == 4); BatchNormMeta* m = (BatchNormMeta*) metas->meta[op]; BatchNorm::backward_kernel(m, my_ip[0], my_grad_op[0], my_op[0], my_grad_ip[0], my_wp[0], my_grad_wp[0], my_grad_wp[1], my_od[0].get_volume()); break; } case OP_DROPOUT: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); DropoutMeta* m = (DropoutMeta*) metas->meta[op]; Dropout::backward_kernel(m, my_grad_op[0], my_grad_ip[0]); break; } case OP_LINEAR: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 2); assert(fused->op_num_outputs[op] == 1); Rect<2> kernel_rect = my_wd[0]; int in_dim = kernel_rect.hi[0] - kernel_rect.lo[0] + 1; int out_dim = kernel_rect.hi[1] - kernel_rect.lo[1] + 1; int batch_size = my_id[0].get_volume() / in_dim; assert(my_od[0].get_volume() == out_dim * batch_size); assert(my_id[0].get_volume() == in_dim * batch_size); assert(my_wd[1].get_volume() == out_dim); LinearMeta* m = (LinearMeta*) metas->meta[op]; Linear::backward_kernel(m, my_ip[0], my_grad_ip[0], my_op[0], my_grad_op[0], my_wp[0], my_grad_wp[0], my_grad_wp[1], in_dim, out_dim, batch_size); break; } case OP_BATCHMATMUL: { assert(fused->op_num_inputs[op] == 2); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); Domain out_domain = my_od[0]; Domain a_domain = my_id[0]; Domain b_domain = my_id[1]; // check dims int m = b_domain.hi()[0] - b_domain.lo()[0] + 1; assert(m == out_domain.hi()[0] - out_domain.lo()[0] + 1); int n = a_domain.hi()[1] - a_domain.lo()[1] + 1; assert(n == out_domain.hi()[1] - out_domain.lo()[1] + 1); int k = a_domain.hi()[0] - a_domain.lo()[0] + 1; assert(k == b_domain.hi()[1] - b_domain.lo()[1] + 1); assert(a_domain.get_dim() == b_domain.get_dim()); assert(a_domain.get_dim() == out_domain.get_dim()); int batch = 1; for (int i = 2; i < a_domain.get_dim(); i++) { int dim_size = a_domain.hi()[i] - a_domain.lo()[i] + 1; assert(dim_size == b_domain.hi()[i] - b_domain.lo()[i] + 1); assert(dim_size == out_domain.hi()[i] - out_domain.lo()[i] + 1); batch *= dim_size; } BatchMatmulMeta* meta = (BatchMatmulMeta*) metas->meta[op]; BatchMatmul::backward_kernel(meta, my_op[0], my_grad_op[0], my_ip[0], my_grad_ip[0], my_ip[1], my_grad_ip[1], NULL, m, n, k, batch); break; } case OP_EW_ADD: case OP_EW_SUB: case OP_EW_MUL: case OP_EW_DIV: { assert(fused->op_num_inputs[op] == 2); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0] == my_id[1]); assert(my_id[0] == my_od[0]); ElementBinaryMeta* m = (ElementBinaryMeta*) metas->meta[op]; ElementBinary::backward_kernel(m, my_grad_op[0], my_ip[0], my_ip[1], my_grad_ip[0], my_grad_ip[1]); break; } case OP_RELU: case OP_SIGMOID: case OP_TANH: case OP_ELU: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0] == my_od[0]); ElementUnaryMeta* m = (ElementUnaryMeta*) metas->meta[op]; ElementUnary::backward_kernel(m, my_ip[0], my_grad_ip[0], my_op[0], my_grad_op[0], my_id[0].get_volume()); break; } case OP_POOL2D: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); //assert(my_id[0] == my_od[0]); Pool2DMeta* m = (Pool2DMeta*) metas->meta[op]; Pool2D::backward_kernel(m, my_ip[0], my_grad_ip[0], my_op[0], my_grad_op[0]); break; } case OP_FLAT: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_grad_id[0].get_volume() == my_grad_od[0].get_volume()); Flat::backward_kernel(my_grad_ip[0], my_grad_op[0], my_grad_id[0].get_volume()); break; } case OP_RESHAPE: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_grad_id[0].get_volume() == my_grad_od[0].get_volume()); Reshape::backward_kernel(my_grad_ip[0], my_grad_op[0], my_grad_id[0].get_volume()); break; } case OP_TRANSPOSE: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_grad_id[0].get_volume() == my_grad_od[0].get_volume()); TransposeMeta* m = (TransposeMeta*) metas->meta[op]; Transpose::backward_kernel(m, my_grad_ip[0], my_grad_op[0], my_grad_id[0], my_grad_od[0]); break; } default: assert(false && "Fusion currently does not support type"); } } assert(ioff == 0); assert(woff == 0); assert(ooff == 0); //for (int i = 0; i < fused->numWeights; i++) // print_tensor<float>(weight_grad_ptr[i], weight_grad_domain[i].get_volume(), "[Fused:backward:weight_grad]"); //for (int i = 0; i < fused->numInputs; i++) // print_tensor<float>(input_grad_ptr[i], input_grad_domain[i].get_volume(), "[Fused:backward:input_grad]"); //for (int i = 0; i < fused->numOutputs; i++) // print_tensor<float>(output_grad_ptr[i], output_grad_domain[i].get_volume(), "[Fused:backward:output_grad]"); } void FusedOp::backward(const FFModel& ff) { // Set iter_config iter_config = ff.iter_config; ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(FUSEDOP_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(FusedOp)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); int idx = 0; for (int i = 0; i < numInputs; i++) { launcher.add_region_requirement( RegionRequirement(input_lps[i], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[i].region)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numWeights; i++) { launcher.add_region_requirement( RegionRequirement(weights[i].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[i].region)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numOutputs; i++) { launcher.add_region_requirement( RegionRequirement(outputs[i].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[i].region)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numInputs; i++) { launcher.add_region_requirement( RegionRequirement(input_grad_lps[i], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[i].region_grad)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numWeights; i++) { launcher.add_region_requirement( RegionRequirement(weights[i].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[i].region_grad)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numOutputs; i++) { launcher.add_region_requirement( RegionRequirement(outputs[i].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[i].region_grad)); launcher.add_field(idx++, FID_DATA); } runtime->execute_index_space(ctx, launcher); } bool FusedOp::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { // The search should happen before fusion assert(false); return false; }
c4666b3c0bb98bb67e37749be48b0669b6c6bc51.hip
// !!! This is a file automatically generated by hipify!!! // #CSCS CUDA Training // // #Example 11 - time per thread operations // // #Author Ugo Varetto // // #Goal: time operations with per-multiprocessor counters // // #Rationale: shows how to use the clock() call wihin kernels // // #Solution: use the clock() function on the GPU to time operations // // #Code: typical flow: // 1) compute launch grid configuration // 2) allocate data on host(cpu) and device(gpu) // 3) copy data from host to device // 4) launch kernel // 5) read data back // 6) consume data (in this case print result) // 7) free memory // // #Compilation: nvcc -arch=sm_13 11_clock.cu -o clock // // #Execution: ./clock // // #Note: kernel invocations ( foo<<<...>>>(...) ) are *always* asynchronous and a call to // hipDeviceSynchronize() is required to wait for the end of kernel execution from // a host thread; in case of synchronous copy operations like hipMemcpy(...,cudaDeviceToHost) // kernel execution is guaranteed to be terminated before data are copied // // // #Note: -arch=sm_13 allows the code to run on every card with hw architecture GT200 (gtx 2xx) or better // // #Note: -arch=sm_13 is the lowest architecture version that supports double precision //#include <hip/hip_runtime.h> // automatically added by nvcc #include <iostream> typedef double real_t; // time clock invocation __global__ void time_clock( clock_t* clkout ) { const clock_t start = clock(); const clock_t stop = clock(); *clkout = stop - start; } __device__ real_t foo() { return 2.0; } // time operation __global__ void time_op( clock_t* clkout, real_t* out, int N, clock_t clock_offset ) { real_t f; clock_t acc = 0; __shared__ real_t local[ 1 ]; for( int i = 0; i != N; ++i ) { const clock_t start = clock(); local[ 0 ]= foo(); const clock_t stop = clock(); acc += stop - start; } *clkout = acc / N - clock_offset; *out = f; } //------------------------------------------------------------------------------ int main( int , char** ) { clock_t* dev_clk = 0; real_t* dev_out = 0; // allocate memory for clock and data output hipMalloc( &dev_clk, sizeof( clock_t ) ); hipMalloc( &dev_out, sizeof( real_t ) ); // compute overhead of invoking clock() hipLaunchKernelGGL(( time_clock), dim3(1),dim3(1), 0, 0, dev_clk ); hipDeviceSynchronize(); clock_t clk_offset = clock_t(); hipMemcpy( &clk_offset, dev_clk, sizeof( clock_t ), hipMemcpyDeviceToHost ); std::cout << "Clock overhead: " << clk_offset << std::endl; // time operation hipLaunchKernelGGL(( time_op), dim3(1),dim3(1), 0, 0, dev_clk, dev_out, 100, clk_offset ); hipDeviceSynchronize(); clock_t clk = clock_t(); hipMemcpy( &clk, dev_clk, sizeof( clock_t ), hipMemcpyDeviceToHost ); // report std::cout << "Clock cycles: " << clk << std::endl; // free resources hipFree( dev_clk ); hipFree( dev_out ); return 0; }
c4666b3c0bb98bb67e37749be48b0669b6c6bc51.cu
// #CSCS CUDA Training // // #Example 11 - time per thread operations // // #Author Ugo Varetto // // #Goal: time operations with per-multiprocessor counters // // #Rationale: shows how to use the clock() call wihin kernels // // #Solution: use the clock() function on the GPU to time operations // // #Code: typical flow: // 1) compute launch grid configuration // 2) allocate data on host(cpu) and device(gpu) // 3) copy data from host to device // 4) launch kernel // 5) read data back // 6) consume data (in this case print result) // 7) free memory // // #Compilation: nvcc -arch=sm_13 11_clock.cu -o clock // // #Execution: ./clock // // #Note: kernel invocations ( foo<<<...>>>(...) ) are *always* asynchronous and a call to // cudaThreadSynchronize() is required to wait for the end of kernel execution from // a host thread; in case of synchronous copy operations like cudaMemcpy(...,cudaDeviceToHost) // kernel execution is guaranteed to be terminated before data are copied // // // #Note: -arch=sm_13 allows the code to run on every card with hw architecture GT200 (gtx 2xx) or better // // #Note: -arch=sm_13 is the lowest architecture version that supports double precision //#include <cuda_runtime.h> // automatically added by nvcc #include <iostream> typedef double real_t; // time clock invocation __global__ void time_clock( clock_t* clkout ) { const clock_t start = clock(); const clock_t stop = clock(); *clkout = stop - start; } __device__ real_t foo() { return 2.0; } // time operation __global__ void time_op( clock_t* clkout, real_t* out, int N, clock_t clock_offset ) { real_t f; clock_t acc = 0; __shared__ real_t local[ 1 ]; for( int i = 0; i != N; ++i ) { const clock_t start = clock(); local[ 0 ]= foo(); const clock_t stop = clock(); acc += stop - start; } *clkout = acc / N - clock_offset; *out = f; } //------------------------------------------------------------------------------ int main( int , char** ) { clock_t* dev_clk = 0; real_t* dev_out = 0; // allocate memory for clock and data output cudaMalloc( &dev_clk, sizeof( clock_t ) ); cudaMalloc( &dev_out, sizeof( real_t ) ); // compute overhead of invoking clock() time_clock<<<1,1>>>( dev_clk ); cudaThreadSynchronize(); clock_t clk_offset = clock_t(); cudaMemcpy( &clk_offset, dev_clk, sizeof( clock_t ), cudaMemcpyDeviceToHost ); std::cout << "Clock overhead: " << clk_offset << std::endl; // time operation time_op<<<1,1>>>( dev_clk, dev_out, 100, clk_offset ); cudaThreadSynchronize(); clock_t clk = clock_t(); cudaMemcpy( &clk, dev_clk, sizeof( clock_t ), cudaMemcpyDeviceToHost ); // report std::cout << "Clock cycles: " << clk << std::endl; // free resources cudaFree( dev_clk ); cudaFree( dev_out ); return 0; }
2df9465c2e9b40777d3cbf269addef5857f46951.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @author Azzam Haidar @author Ahmad Ahmad @generated from magmablas/zpotf2_kernels.cu, normal z -> d, Tue Aug 30 09:38:39 2016 */ #include "magma_internal.h" #include "batched_kernel_param.h" #define PRECISION_d #if defined(VERSION31) #define ENABLE_COND1 #define ENABLE_COND2 #endif #define MAX_NTCOL 8 #if defined(PRECISION_s) #define NTCOL2 (4) #define NTCOL1 (8) #elif defined(PRECISION_d) #define NTCOL2 (2) #define NTCOL1 (4) #else #define NTCOL2 (1) #define NTCOL1 (1) #endif #include "dpotf2_devicesfunc.cuh" /******************************************************************************/ __global__ void dpotf2_smlpout_fixwidth_kernel_batched(int m, double **dA_array, int lda, int localstep, int gbstep, magma_int_t *info_array, const int batchCount) { const int batchid = blockIdx.z * blockDim.y + threadIdx.y; if (batchid >= batchCount) return; dpotf2_smlpout_fixwidth_device(m, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid])); } /******************************************************************************/ __global__ void dpotf2_smlpout_anywidth_kernel_batched(int m, int n, double **dA_array, int lda, int localstep, int gbstep, magma_int_t *info_array, const int batchCount) { const int batchid = blockIdx.z * blockDim.y + threadIdx.y; if (batchid >= batchCount) return; dpotf2_smlpout_anywidth_device(m, n, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid])); } /******************************************************************************/ extern "C" magma_int_t magma_dpotrf_lpout_batched( magma_uplo_t uplo, magma_int_t n, double **dA_array, magma_int_t lda, magma_int_t gbstep, magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t m = n; magma_int_t arginfo = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { arginfo = -1; } else if (m < 0 || n < 0 ) { arginfo = -2; } else if (lda < max(1,m)) { arginfo = -4; } else if (m < n) { arginfo = -10; } if (uplo == MagmaUpper) { fprintf( stderr, "%s: uplo=upper is not yet implemented\n", __func__ ); arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } // Quick return if possible if (m == 0 || n == 0) { return arginfo; } magma_int_t roundup_m = m; // rounding up need more investigation since it coul dmodify the matrix out of its bound //magma_int_t m8 = magma_roundup( m, 8 ); //magma_int_t roundup_m = m8 > lda ? m : m8; //magma_int_t m32 = magma_roundup( m, 32 ); //magma_int_t roundup_m = m32 > lda ? m : m32; magma_int_t ib, rows; for (magma_int_t j = 0; j < n; j += POTF2_NB) { ib = min(POTF2_NB, n-j); rows = roundup_m-j; // tuning ntcol magma_int_t ntcol; // for z precision, the best tuning is at NTCOL = 1 for all sizes if (rows > 64) ntcol = 1; else if (rows > 32) ntcol = NTCOL2; else ntcol = NTCOL1; // end of tuning ntcol const magma_int_t nTB = magma_ceildiv( batchCount, ntcol ); dim3 dimGrid(1, 1, nTB); magma_int_t nbth = rows; magma_int_t shared_mem_size = ntcol * (sizeof(double)*(nbth+POTF2_NB)*POTF2_NB); dim3 threads(nbth, ntcol); if (shared_mem_size > 47000) { arginfo = -33; magma_xerbla( __func__, -(arginfo) ); return arginfo; } if (ib == POTF2_NB) { hipLaunchKernelGGL(( dpotf2_smlpout_fixwidth_kernel_batched) , dim3(dimGrid), dim3(threads), shared_mem_size, queue->cuda_stream() , rows, dA_array, lda, j, gbstep, info_array, batchCount); } else { hipLaunchKernelGGL(( dpotf2_smlpout_anywidth_kernel_batched) , dim3(dimGrid), dim3(threads), shared_mem_size, queue->cuda_stream() , rows, ib, dA_array, lda, j, gbstep, info_array, batchCount); } } return arginfo; }
2df9465c2e9b40777d3cbf269addef5857f46951.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @author Azzam Haidar @author Ahmad Ahmad @generated from magmablas/zpotf2_kernels.cu, normal z -> d, Tue Aug 30 09:38:39 2016 */ #include "magma_internal.h" #include "batched_kernel_param.h" #define PRECISION_d #if defined(VERSION31) #define ENABLE_COND1 #define ENABLE_COND2 #endif #define MAX_NTCOL 8 #if defined(PRECISION_s) #define NTCOL2 (4) #define NTCOL1 (8) #elif defined(PRECISION_d) #define NTCOL2 (2) #define NTCOL1 (4) #else #define NTCOL2 (1) #define NTCOL1 (1) #endif #include "dpotf2_devicesfunc.cuh" /******************************************************************************/ __global__ void dpotf2_smlpout_fixwidth_kernel_batched(int m, double **dA_array, int lda, int localstep, int gbstep, magma_int_t *info_array, const int batchCount) { const int batchid = blockIdx.z * blockDim.y + threadIdx.y; if (batchid >= batchCount) return; dpotf2_smlpout_fixwidth_device(m, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid])); } /******************************************************************************/ __global__ void dpotf2_smlpout_anywidth_kernel_batched(int m, int n, double **dA_array, int lda, int localstep, int gbstep, magma_int_t *info_array, const int batchCount) { const int batchid = blockIdx.z * blockDim.y + threadIdx.y; if (batchid >= batchCount) return; dpotf2_smlpout_anywidth_device(m, n, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid])); } /******************************************************************************/ extern "C" magma_int_t magma_dpotrf_lpout_batched( magma_uplo_t uplo, magma_int_t n, double **dA_array, magma_int_t lda, magma_int_t gbstep, magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t m = n; magma_int_t arginfo = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { arginfo = -1; } else if (m < 0 || n < 0 ) { arginfo = -2; } else if (lda < max(1,m)) { arginfo = -4; } else if (m < n) { arginfo = -10; } if (uplo == MagmaUpper) { fprintf( stderr, "%s: uplo=upper is not yet implemented\n", __func__ ); arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } // Quick return if possible if (m == 0 || n == 0) { return arginfo; } magma_int_t roundup_m = m; // rounding up need more investigation since it coul dmodify the matrix out of its bound //magma_int_t m8 = magma_roundup( m, 8 ); //magma_int_t roundup_m = m8 > lda ? m : m8; //magma_int_t m32 = magma_roundup( m, 32 ); //magma_int_t roundup_m = m32 > lda ? m : m32; magma_int_t ib, rows; for (magma_int_t j = 0; j < n; j += POTF2_NB) { ib = min(POTF2_NB, n-j); rows = roundup_m-j; // tuning ntcol magma_int_t ntcol; // for z precision, the best tuning is at NTCOL = 1 for all sizes if (rows > 64) ntcol = 1; else if (rows > 32) ntcol = NTCOL2; else ntcol = NTCOL1; // end of tuning ntcol const magma_int_t nTB = magma_ceildiv( batchCount, ntcol ); dim3 dimGrid(1, 1, nTB); magma_int_t nbth = rows; magma_int_t shared_mem_size = ntcol * (sizeof(double)*(nbth+POTF2_NB)*POTF2_NB); dim3 threads(nbth, ntcol); if (shared_mem_size > 47000) { arginfo = -33; magma_xerbla( __func__, -(arginfo) ); return arginfo; } if (ib == POTF2_NB) { dpotf2_smlpout_fixwidth_kernel_batched <<< dimGrid, threads, shared_mem_size, queue->cuda_stream() >>> (rows, dA_array, lda, j, gbstep, info_array, batchCount); } else { dpotf2_smlpout_anywidth_kernel_batched <<< dimGrid, threads, shared_mem_size, queue->cuda_stream() >>> (rows, ib, dA_array, lda, j, gbstep, info_array, batchCount); } } return arginfo; }
cb2c60b19bcecba2eb7be3a4eb0fc0811e562e69.hip
// !!! This is a file automatically generated by hipify!!! #include "Doc.cuh" Document::Document(string argFilePrefix, int argNumChunks, int argMaxTLLength, int argmaxDocLength, int argWordLength) { filePrefix = argFilePrefix; numChunks = argNumChunks; maxTLLength = argMaxTLLength; maxDocLength = argmaxDocLength; wordLength = argWordLength; perplexityMid = new float[GridDim]; perplexity = new float[maxTLLength]; } void Document::loadDocument() { TLLengthVec = new int[numChunks]; docLengthVec = new int[numChunks]; numOfTokenVecD = new int[numChunks]; numOfTokenVecS = new int[numChunks]; ifstream docLength((filePrefix + string("/docLength.txt")).c_str(), ios::binary);//store max Doc and DT length ifstream TLLength((filePrefix + string("/TLLength.txt")).c_str(), ios::binary); ifstream TLSplit((filePrefix + string("/TLSplit.txt")).c_str(), ios::binary); for (int chunkId = 0; chunkId < numChunks; chunkId++) { TLLength >> TLLengthVec[chunkId]; docLength >> docLengthVec[chunkId]; TLSplit >> numOfTokenVecD[chunkId] >> numOfTokenVecS[chunkId]; totalNumOfTokens += TLLengthVec[chunkId]; DocChunk tmpDocChunk(TLLengthVec[chunkId], docLengthVec[chunkId], wordLength); tmpDocChunk.CPUMemSet(); tmpDocChunk.loadChunk(filePrefix, chunkId); docChunkVec.push_back(tmpDocChunk); } printf("total num of tokens:%f\n", totalNumOfTokens); printf("All chunks loaded!"); docLength.close(); TLLength.close(); } void Document::CPU2GPUPerplexity() { memset(perplexityMid, 0, GridDim * sizeof(float)); hipMemcpy(devicePerplexityMid, perplexityMid, GridDim* sizeof(float), hipMemcpyHostToDevice); /*hipMemset(devicePerplexity,0,maxTLLength*sizeof(float));*/ } void Document::GPU2CPUPerplexity() { hipMemcpy(perplexityMid, devicePerplexityMid, (GridDim) * sizeof(float), hipMemcpyDeviceToHost); //hipMemcpy(perplexity, devicePerplexity, maxTLLength*sizeof(float), hipMemcpyDeviceToHost); sumPerplexity = 0.0; for (int i = 0; i < GridDim; i++) { //printf("Perplexity:%f \n", perplexityMid[i]); sumPerplexity += perplexityMid[i]/ 467723.0; } //printf("Parallel Perplexity:%f \n", sumPerplexity); } void Document::CPU2DiskPerplexity(string argFilePrefix) { ofstream OutPutPerplexity((argFilePrefix + string("/Perplexity.txt")).c_str(), ios::binary); for (int i = 0; i < maxTLLength; i++) { OutPutPerplexity << perplexity[i] << "\n"; } OutPutPerplexity.close(); } void Document::GPUMemAllocate() { hipMalloc((void**)&deviceTLTopic, (maxTLLength) * sizeof(unsigned short int)); hipMalloc((void**)&deviceTLDocCount, (maxDocLength) * sizeof(int)); hipMalloc((void**)&deviceTLDocOffset, (maxDocLength) * sizeof(int)); hipMalloc((void**)&deviceTLWordCount, (wordLength) * sizeof(int)); hipMalloc((void**)&deviceTLWordOffset, (wordLength) * sizeof(int)); hipMalloc((void**)&deviceMapWord2Doc, (maxTLLength) * sizeof(int)); hipMalloc((void**)&deviceMapDoc2Word, (maxTLLength) * sizeof(int)); hipMalloc((void**)&devicePerplexity, (maxTLLength) * sizeof(float)); hipMalloc((void**)&devicePerplexityMid, sizeof(float)*GridDim); hipMalloc((void **)&d_blockCounter, sizeof(int)*(1)); hipMalloc((void **)&d_warpCounter, sizeof(int)*(1)); hipMalloc((void **)&d_dense, sizeof(int)*(GridDim*BlockDim*K/32)); hipMalloc((void **)&deviceWTHeadDense, sizeof(float)*(GridDim*K)); TLMemory = ((3 * maxTLLength + 2 * maxDocLength + 2 * wordLength + GridDim*K) * sizeof(int) + (maxTLLength + GridDim*BlockDim / 32 + GridDim*K) * sizeof(float))/ 1000000000.0; printf("Token list memory usage:%f GB\n", TLMemory); } void Document::CPU2GPU(int argChunkId) { hipMemcpy(deviceTLTopic, docChunkVec[argChunkId].TLTopic, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyHostToDevice); hipMemcpy(deviceTLDocCount, docChunkVec[argChunkId].TLDocCount, (docLengthVec[argChunkId]) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(deviceTLDocOffset, docChunkVec[argChunkId].TLDocOffset, (docLengthVec[argChunkId]) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(deviceTLWordCount, docChunkVec[argChunkId].TLWordCount, (wordLength) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(deviceTLWordOffset, docChunkVec[argChunkId].TLWordOffset, (wordLength) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(deviceMapWord2Doc, docChunkVec[argChunkId].mapWord2Doc, (TLLengthVec[argChunkId]) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(deviceMapDoc2Word, docChunkVec[argChunkId].mapDoc2Word, (TLLengthVec[argChunkId]) * sizeof(int), hipMemcpyHostToDevice); } void Document::GPU2CPU(int argChunkId) { hipMemcpy(docChunkVec[argChunkId].TLTopic, deviceTLTopic, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyDeviceToHost); }
cb2c60b19bcecba2eb7be3a4eb0fc0811e562e69.cu
#include "Doc.cuh" Document::Document(string argFilePrefix, int argNumChunks, int argMaxTLLength, int argmaxDocLength, int argWordLength) { filePrefix = argFilePrefix; numChunks = argNumChunks; maxTLLength = argMaxTLLength; maxDocLength = argmaxDocLength; wordLength = argWordLength; perplexityMid = new float[GridDim]; perplexity = new float[maxTLLength]; } void Document::loadDocument() { TLLengthVec = new int[numChunks]; docLengthVec = new int[numChunks]; numOfTokenVecD = new int[numChunks]; numOfTokenVecS = new int[numChunks]; ifstream docLength((filePrefix + string("/docLength.txt")).c_str(), ios::binary);//store max Doc and DT length ifstream TLLength((filePrefix + string("/TLLength.txt")).c_str(), ios::binary); ifstream TLSplit((filePrefix + string("/TLSplit.txt")).c_str(), ios::binary); for (int chunkId = 0; chunkId < numChunks; chunkId++) { TLLength >> TLLengthVec[chunkId]; docLength >> docLengthVec[chunkId]; TLSplit >> numOfTokenVecD[chunkId] >> numOfTokenVecS[chunkId]; totalNumOfTokens += TLLengthVec[chunkId]; DocChunk tmpDocChunk(TLLengthVec[chunkId], docLengthVec[chunkId], wordLength); tmpDocChunk.CPUMemSet(); tmpDocChunk.loadChunk(filePrefix, chunkId); docChunkVec.push_back(tmpDocChunk); } printf("total num of tokens:%f\n", totalNumOfTokens); printf("All chunks loaded!"); docLength.close(); TLLength.close(); } void Document::CPU2GPUPerplexity() { memset(perplexityMid, 0, GridDim * sizeof(float)); cudaMemcpy(devicePerplexityMid, perplexityMid, GridDim* sizeof(float), cudaMemcpyHostToDevice); /*cudaMemset(devicePerplexity,0,maxTLLength*sizeof(float));*/ } void Document::GPU2CPUPerplexity() { cudaMemcpy(perplexityMid, devicePerplexityMid, (GridDim) * sizeof(float), cudaMemcpyDeviceToHost); //cudaMemcpy(perplexity, devicePerplexity, maxTLLength*sizeof(float), cudaMemcpyDeviceToHost); sumPerplexity = 0.0; for (int i = 0; i < GridDim; i++) { //printf("Perplexity:%f \n", perplexityMid[i]); sumPerplexity += perplexityMid[i]/ 467723.0; } //printf("Parallel Perplexity:%f \n", sumPerplexity); } void Document::CPU2DiskPerplexity(string argFilePrefix) { ofstream OutPutPerplexity((argFilePrefix + string("/Perplexity.txt")).c_str(), ios::binary); for (int i = 0; i < maxTLLength; i++) { OutPutPerplexity << perplexity[i] << "\n"; } OutPutPerplexity.close(); } void Document::GPUMemAllocate() { cudaMalloc((void**)&deviceTLTopic, (maxTLLength) * sizeof(unsigned short int)); cudaMalloc((void**)&deviceTLDocCount, (maxDocLength) * sizeof(int)); cudaMalloc((void**)&deviceTLDocOffset, (maxDocLength) * sizeof(int)); cudaMalloc((void**)&deviceTLWordCount, (wordLength) * sizeof(int)); cudaMalloc((void**)&deviceTLWordOffset, (wordLength) * sizeof(int)); cudaMalloc((void**)&deviceMapWord2Doc, (maxTLLength) * sizeof(int)); cudaMalloc((void**)&deviceMapDoc2Word, (maxTLLength) * sizeof(int)); cudaMalloc((void**)&devicePerplexity, (maxTLLength) * sizeof(float)); cudaMalloc((void**)&devicePerplexityMid, sizeof(float)*GridDim); cudaMalloc((void **)&d_blockCounter, sizeof(int)*(1)); cudaMalloc((void **)&d_warpCounter, sizeof(int)*(1)); cudaMalloc((void **)&d_dense, sizeof(int)*(GridDim*BlockDim*K/32)); cudaMalloc((void **)&deviceWTHeadDense, sizeof(float)*(GridDim*K)); TLMemory = ((3 * maxTLLength + 2 * maxDocLength + 2 * wordLength + GridDim*K) * sizeof(int) + (maxTLLength + GridDim*BlockDim / 32 + GridDim*K) * sizeof(float))/ 1000000000.0; printf("Token list memory usage:%f GB\n", TLMemory); } void Document::CPU2GPU(int argChunkId) { cudaMemcpy(deviceTLTopic, docChunkVec[argChunkId].TLTopic, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyHostToDevice); cudaMemcpy(deviceTLDocCount, docChunkVec[argChunkId].TLDocCount, (docLengthVec[argChunkId]) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceTLDocOffset, docChunkVec[argChunkId].TLDocOffset, (docLengthVec[argChunkId]) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceTLWordCount, docChunkVec[argChunkId].TLWordCount, (wordLength) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceTLWordOffset, docChunkVec[argChunkId].TLWordOffset, (wordLength) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceMapWord2Doc, docChunkVec[argChunkId].mapWord2Doc, (TLLengthVec[argChunkId]) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceMapDoc2Word, docChunkVec[argChunkId].mapDoc2Word, (TLLengthVec[argChunkId]) * sizeof(int), cudaMemcpyHostToDevice); } void Document::GPU2CPU(int argChunkId) { cudaMemcpy(docChunkVec[argChunkId].TLTopic, deviceTLTopic, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyDeviceToHost); }
167d72a933ff2bc7eacf792da18ede3626c5e68b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include "hip/hip_runtime.h" #include <assert.h> #define N 2//64 __global__ void foo(float* A) { __requires(*A == 0); if(blockIdx.x == 0) A[threadIdx.x] = 50.f; }
167d72a933ff2bc7eacf792da18ede3626c5e68b.cu
#include <stdio.h> #include <stdlib.h> #include "cuda.h" #include <assert.h> #define N 2//64 __global__ void foo(float* A) { __requires(*A == 0); if(blockIdx.x == 0) A[threadIdx.x] = 50.f; }
008d57179bffe7187ddc6c36140c1e4f6b6098d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim3_poisson_kernel_populate; int xdim3_poisson_kernel_populate_h = -1; int ydim3_poisson_kernel_populate_h = -1; __constant__ int xdim4_poisson_kernel_populate; int xdim4_poisson_kernel_populate_h = -1; int ydim4_poisson_kernel_populate_h = -1; __constant__ int xdim5_poisson_kernel_populate; int xdim5_poisson_kernel_populate_h = -1; int ydim5_poisson_kernel_populate_h = -1; #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #define OPS_ACC3(x, y) (x + xdim3_poisson_kernel_populate * (y)) #define OPS_ACC4(x, y) (x + xdim4_poisson_kernel_populate * (y)) #define OPS_ACC5(x, y) (x + xdim5_poisson_kernel_populate * (y)) // user function __device__ void poisson_kernel_populate_gpu(const int *dispx, const int *dispy, const int *idx, double *u, double *f, double *ref) { double x = dx * (double)(idx[0] + dispx[0]); double y = dy * (double)(idx[1] + dispy[0]); u[OPS_ACC3(0, 0)] = sin(M_PI * x) * cos(2.0 * M_PI * y); f[OPS_ACC4(0, 0)] = -5.0 * M_PI * M_PI * sin(M_PI * x) * cos(2.0 * M_PI * y); ref[OPS_ACC5(0, 0)] = sin(M_PI * x) * cos(2.0 * M_PI * y); } #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 __global__ void ops_poisson_kernel_populate(const int arg0, const int arg1, int arg_idx0, int arg_idx1, double *__restrict arg3, double *__restrict arg4, double *__restrict arg5, int size0, int size1) { int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int arg_idx[2]; arg_idx[0] = arg_idx0 + idx_x; arg_idx[1] = arg_idx1 + idx_y; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_poisson_kernel_populate; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_poisson_kernel_populate; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_poisson_kernel_populate; if (idx_x < size0 && idx_y < size1) { poisson_kernel_populate_gpu(&arg0, &arg1, arg_idx, arg3, arg4, arg5); } } // host stub function void ops_par_loop_poisson_kernel_populate(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { // Timing double t1, t2, c1, c2; ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 6, range, 0)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(0, "poisson_kernel_populate"); OPS_kernels[0].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 2; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 2; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int arg_idx[2]; #ifdef OPS_MPI arg_idx[0] = sb->decomp_disp[0] + start[0]; arg_idx[1] = sb->decomp_disp[1] + start[1]; #else arg_idx[0] = start[0]; arg_idx[1] = start[1]; #endif int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; int xdim5 = args[5].dat->size[0]; if (xdim3 != xdim3_poisson_kernel_populate_h || xdim4 != xdim4_poisson_kernel_populate_h || xdim5 != xdim5_poisson_kernel_populate_h) { hipMemcpyToSymbol(xdim3_poisson_kernel_populate, &xdim3, sizeof(int)); xdim3_poisson_kernel_populate_h = xdim3; hipMemcpyToSymbol(xdim4_poisson_kernel_populate, &xdim4, sizeof(int)); xdim4_poisson_kernel_populate_h = xdim4; hipMemcpyToSymbol(xdim5_poisson_kernel_populate, &xdim5, sizeof(int)); xdim5_poisson_kernel_populate_h = xdim5; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; char *p_a[6]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); p_a[5] = (char *)args[5].data_d + base5; ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args, 6, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[0].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_poisson_kernel_populate), dim3(grid), dim3(tblock), 0, 0, *(int *)arg0.data, *(int *)arg1.data, arg_idx[0], arg_idx[1], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], x_size, y_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[0].time += t1 - t2; } ops_set_dirtybit_device(args, 6); ops_set_halo_dirtybit3(&args[3], range); ops_set_halo_dirtybit3(&args[4], range); ops_set_halo_dirtybit3(&args[5], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[0].mpi_time += t2 - t1; OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg5); } }
008d57179bffe7187ddc6c36140c1e4f6b6098d8.cu
// // auto-generated by ops.py // __constant__ int xdim3_poisson_kernel_populate; int xdim3_poisson_kernel_populate_h = -1; int ydim3_poisson_kernel_populate_h = -1; __constant__ int xdim4_poisson_kernel_populate; int xdim4_poisson_kernel_populate_h = -1; int ydim4_poisson_kernel_populate_h = -1; __constant__ int xdim5_poisson_kernel_populate; int xdim5_poisson_kernel_populate_h = -1; int ydim5_poisson_kernel_populate_h = -1; #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #define OPS_ACC3(x, y) (x + xdim3_poisson_kernel_populate * (y)) #define OPS_ACC4(x, y) (x + xdim4_poisson_kernel_populate * (y)) #define OPS_ACC5(x, y) (x + xdim5_poisson_kernel_populate * (y)) // user function __device__ void poisson_kernel_populate_gpu(const int *dispx, const int *dispy, const int *idx, double *u, double *f, double *ref) { double x = dx * (double)(idx[0] + dispx[0]); double y = dy * (double)(idx[1] + dispy[0]); u[OPS_ACC3(0, 0)] = sin(M_PI * x) * cos(2.0 * M_PI * y); f[OPS_ACC4(0, 0)] = -5.0 * M_PI * M_PI * sin(M_PI * x) * cos(2.0 * M_PI * y); ref[OPS_ACC5(0, 0)] = sin(M_PI * x) * cos(2.0 * M_PI * y); } #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 __global__ void ops_poisson_kernel_populate(const int arg0, const int arg1, int arg_idx0, int arg_idx1, double *__restrict arg3, double *__restrict arg4, double *__restrict arg5, int size0, int size1) { int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int arg_idx[2]; arg_idx[0] = arg_idx0 + idx_x; arg_idx[1] = arg_idx1 + idx_y; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_poisson_kernel_populate; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_poisson_kernel_populate; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_poisson_kernel_populate; if (idx_x < size0 && idx_y < size1) { poisson_kernel_populate_gpu(&arg0, &arg1, arg_idx, arg3, arg4, arg5); } } // host stub function void ops_par_loop_poisson_kernel_populate(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { // Timing double t1, t2, c1, c2; ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 6, range, 0)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(0, "poisson_kernel_populate"); OPS_kernels[0].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 2; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 2; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int arg_idx[2]; #ifdef OPS_MPI arg_idx[0] = sb->decomp_disp[0] + start[0]; arg_idx[1] = sb->decomp_disp[1] + start[1]; #else arg_idx[0] = start[0]; arg_idx[1] = start[1]; #endif int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; int xdim5 = args[5].dat->size[0]; if (xdim3 != xdim3_poisson_kernel_populate_h || xdim4 != xdim4_poisson_kernel_populate_h || xdim5 != xdim5_poisson_kernel_populate_h) { cudaMemcpyToSymbol(xdim3_poisson_kernel_populate, &xdim3, sizeof(int)); xdim3_poisson_kernel_populate_h = xdim3; cudaMemcpyToSymbol(xdim4_poisson_kernel_populate, &xdim4, sizeof(int)); xdim4_poisson_kernel_populate_h = xdim4; cudaMemcpyToSymbol(xdim5_poisson_kernel_populate, &xdim5, sizeof(int)); xdim5_poisson_kernel_populate_h = xdim5; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; char *p_a[6]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); p_a[5] = (char *)args[5].data_d + base5; ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args, 6, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[0].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_poisson_kernel_populate<<<grid, tblock>>>( *(int *)arg0.data, *(int *)arg1.data, arg_idx[0], arg_idx[1], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], x_size, y_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[0].time += t1 - t2; } ops_set_dirtybit_device(args, 6); ops_set_halo_dirtybit3(&args[3], range); ops_set_halo_dirtybit3(&args[4], range); ops_set_halo_dirtybit3(&args[5], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[0].mpi_time += t2 - t1; OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg5); } }
0333cb9caae3fc8745580a399ae91ff76c1a3be2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void clock_block(clock_t *d_o, clock_t clock_count) { unsigned int start_clock = (unsigned int) clock(); clock_t clock_offset = 0; while (clock_offset < clock_count) { unsigned int end_clock = (unsigned int) clock(); // The code below should work like // this (thanks to modular arithmetics): // // clock_offset = (clock_t) (end_clock > start_clock ? // end_clock - start_clock : // end_clock + (0xffffffffu - start_clock)); // // Indeed, let m = 2^32 then // end - start = end + m - start (mod m). clock_offset = (clock_t)(end_clock - start_clock); } d_o[0] = clock_offset; }
0333cb9caae3fc8745580a399ae91ff76c1a3be2.cu
#include "includes.h" __global__ void clock_block(clock_t *d_o, clock_t clock_count) { unsigned int start_clock = (unsigned int) clock(); clock_t clock_offset = 0; while (clock_offset < clock_count) { unsigned int end_clock = (unsigned int) clock(); // The code below should work like // this (thanks to modular arithmetics): // // clock_offset = (clock_t) (end_clock > start_clock ? // end_clock - start_clock : // end_clock + (0xffffffffu - start_clock)); // // Indeed, let m = 2^32 then // end - start = end + m - start (mod m). clock_offset = (clock_t)(end_clock - start_clock); } d_o[0] = clock_offset; }
006adb8de7b6a93b50eb5c7bc9f42d16ed817861.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // @file cov_traceNorm_gpu.cu // @brief fast MPN-COV implementation (GPU) // @author Jiangtao Xie // @author Peihua Li /* Copyright (C) 2018 Peihua Li and Jiangtao Xie All rights reserved. */ #include "nncov_traceNorm_blas.hpp" #include "../data.hpp" #include <math.h> #include <memory> #include <cstdlib> #include <algorithm> #include <limits> #include <cassert> #include "blashelper_gpu.hpp" #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) // We import this Macro function from our Caffe Implementation inline int GET_BLOCKS(const int N) { return (N + VL_CUDA_NUM_THREADS - 1) / VL_CUDA_NUM_THREADS; // We import this function from our Caffe Implementation } template<typename T> __global__ void set_kernel(const ptrdiff_t n, const T alpha, T* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template<typename T> void gpuMemset(const ptrdiff_t n, const T alpha, T* y) { if(alpha == 0){ hipMemset(y, 0, sizeof(T)*n); } hipLaunchKernelGGL(( set_kernel<T>), dim3(GET_BLOCKS(n)),dim3(VL_CUDA_NUM_THREADS), 0, 0, n , alpha, y); } template<typename T> __global__ void init_I_kernel(T* a, T alpha, const ptrdiff_t n) { CUDA_KERNEL_LOOP(index,n){ a[index*(n+1)] = alpha; } } template<typename T> __global__ void traceNormBackward_kernel(T* a, T const* alpha, T* beta, T const* sigma, ptrdiff_t n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { a[i*(n+1)] = a[i*(n+1)] - beta[0]/(alpha[0] * alpha[0]) + sigma[0]/(2.0f * sqrt(alpha[0])); } } template<typename T> __host__ void traceNormBackward_gpu(T* a, T const* alpha, T* beta, T const* sigma, ptrdiff_t n) { hipLaunchKernelGGL(( traceNormBackward_kernel<T>) , dim3(GET_BLOCKS(n)),dim3(VL_CUDA_NUM_THREADS), 0, 0, a,alpha,beta,sigma,n); } template<typename T> __global__ void copy_kernel(T* a, T* b, ptrdiff_t n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ a[i] = b[i]; } } template<typename T> __global__ void scale_kernel(T* a, T const* b, T const* alpha, ptrdiff_t n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ a[i] = (1.0f / alpha[0]) * b[i]; } } template<typename T> __global__ void scale2_kernel(T* a, T const* b, T const* alpha, ptrdiff_t n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ a[i] = sqrt(alpha[0]) * b[i]; } } template<typename T> __global__ void mul_kernel(T* a, T const* alpha, ptrdiff_t n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ a[i] = a[i] / ( 2.0f * alpha[0]); } } namespace vl { namespace impl { template<typename T,vl::DataType dataType> struct cov_traceNorm<vl::VLDT_GPU,T,dataType> { static vl::ErrorCode forward(Context& context, T* output, T const* data, T* aux_T, size_t height, size_t width, size_t depth, size_t num) { vl::ErrorCode error; ptrdiff_t m = height,n = width,L = num,d,i; ptrdiff_t dataOffset; ptrdiff_t aux_TOffset; ptrdiff_t outputOffset; unsigned int workspaceSize = (unsigned int)(n*n); T* workspace = (T*)context.getWorkspace(vl::VLDT_GPU , workspaceSize*sizeof(T)); T* I1 = workspace; gpuMemset(n*n, T(0), I1); hipLaunchKernelGGL(( init_I_kernel<T>), dim3(GET_BLOCKS(n)),dim3(VL_CUDA_NUM_THREADS), 0, 0, I1,(T)1,n); for(d = 0;d < L; d++){ // Trace Norm aux_TOffset = d; outputOffset = d*n*n; dataOffset = d*n*n; error = vl::impl::blas<vl::VLDT_GPU, dataType>::dot(context, n*n, data + dataOffset,ptrdiff_t(1), I1,ptrdiff_t(1), aux_T + aux_TOffset); if(error != vl::VLE_Success) {goto done ;} } for(d = 0;d < L; d++){ aux_TOffset = d; outputOffset = d*n*n; dataOffset = d*n*n; hipLaunchKernelGGL(( scale_kernel<T>), dim3(GET_BLOCKS(n*n)),dim3(VL_CUDA_NUM_THREADS), 0, 0, output + outputOffset,data + dataOffset,aux_T + aux_TOffset,n*n); } done: return context.passError(error, __func__); } static vl::ErrorCode backward(Context& context, T* derData, T const* data, T const* derOutput, T const* derOutput_aux, T const* aux_T, size_t height, size_t width, size_t depth, size_t num) { vl::ErrorCode error; ptrdiff_t m = height,n = width,L = num,d; ptrdiff_t derDataOffset,aux_TOffset,derOutputOffset; ptrdiff_t dataOffset,derOutput_auxOffset,P_dot_dLdPOffset; unsigned int workspaceSize = (unsigned int)(L); T* workspace = (T*)context.getWorkspace(vl::VLDT_GPU , workspaceSize*sizeof(T)); T* P_dot_dLdP = workspace; /*debug mxArray *x; mxClassID classID = mxSINGLE_CLASS ; mwSize dim[2]; dim[0] = n;dim[1] = n; x = mxCreateNumericArray(2,dim,classID,mxREAL); T *test = (T*)mxGetData(x); memcpy(test,YZ,sizeof(T)*n*n); mexCallMATLAB(0,NULL,1,&x,"Watch"); */ for(d = 0;d < L;d++){ dataOffset = d*m*n; derDataOffset = d*m*n; derOutputOffset = d*m*n; derOutput_auxOffset = d; aux_TOffset = d; P_dot_dLdPOffset = d; error = vl::impl::blas<vl::VLDT_GPU, dataType>::dot(context, n*n, data + dataOffset,ptrdiff_t(1), derOutput + derOutputOffset,ptrdiff_t(1), P_dot_dLdP + P_dot_dLdPOffset); if(error != vl::VLE_Success) {goto done ;} } for(d = 0;d < L; d++){ dataOffset = d*m*n; derDataOffset = d*m*n; derOutputOffset = d*m*n; derOutput_auxOffset = d; aux_TOffset = d; P_dot_dLdPOffset = d; hipLaunchKernelGGL(( scale_kernel<T>), dim3(GET_BLOCKS(n*n)),dim3(VL_CUDA_NUM_THREADS), 0, 0, derData + derDataOffset,derOutput + derOutputOffset,aux_T + aux_TOffset,n*n); traceNormBackward_gpu(derData + derDataOffset,aux_T + aux_TOffset,P_dot_dLdP + P_dot_dLdPOffset,derOutput_aux + derOutput_auxOffset, n); } done: return context.passError(error, __func__); } static vl::ErrorCode forward_aux(Context& context, T* output, T const* data, T* aux_T, size_t height, size_t width, size_t depth, size_t num) { vl::ErrorCode error; ptrdiff_t length = depth,L = num,d; ptrdiff_t outputOffset,aux_TOffset,dataOffset; for(d = 0; d < L; d++){ outputOffset = d*length; dataOffset = d*length; aux_TOffset = d; /*error = vl::impl::blas<vl::VLDT_CPU, dataType>::scal(context, length, alpha, output + OutputOffset,ptrdiff_t(1)); if(error != vl::VLE_Success) {goto done ;}*/ hipLaunchKernelGGL(( scale2_kernel<T>), dim3(GET_BLOCKS(length)),dim3(VL_CUDA_NUM_THREADS), 0, 0, output + outputOffset,data + dataOffset,aux_T + aux_TOffset,length); error = vl::VLE_Success; } done: return context.passError(error, __func__); } static vl::ErrorCode backward_aux(Context& context, T* derData, T* derData_aux, T const* data, T const* derOutput, T const* aux_T, size_t height, size_t width, size_t depth, size_t num) { vl::ErrorCode error; ptrdiff_t length = depth,L = num,d; ptrdiff_t dataOffset,aux_TOffset; ptrdiff_t derDataOffset,derData_auxOffset; ptrdiff_t derOutputOffset; for(d = 0; d < L; d++){ derDataOffset = d*length; derOutputOffset = d*length; derData_auxOffset = d; dataOffset = d*length; aux_TOffset= d; error = vl::impl::blas<vl::VLDT_GPU, dataType>::dot(context, length, data + dataOffset,ptrdiff_t(1), derOutput + derOutputOffset,ptrdiff_t(1), derData_aux + derData_auxOffset); if(error != vl::VLE_Success) {goto done ;} } for(d = 0; d < L; d++){ derDataOffset = d*length; derOutputOffset = d*length; derData_auxOffset = d; dataOffset = d*length; aux_TOffset= d; hipLaunchKernelGGL(( scale2_kernel<T>), dim3(GET_BLOCKS(length)),dim3(VL_CUDA_NUM_THREADS), 0, 0, derData + derDataOffset,derOutput + derOutputOffset,aux_T + aux_TOffset,length); } done: return context.passError(error, __func__); } }; } } template struct vl::impl::cov_traceNorm<vl::VLDT_GPU, float,vl::VLDT_Float> ; #ifdef ENABLE_DOUBLE template struct vl::impl::cov_traceNorm<vl::VLDT_GPU, double, vl::VLDT_Double> ; #endif
006adb8de7b6a93b50eb5c7bc9f42d16ed817861.cu
// @file cov_traceNorm_gpu.cu // @brief fast MPN-COV implementation (GPU) // @author Jiangtao Xie // @author Peihua Li /* Copyright (C) 2018 Peihua Li and Jiangtao Xie All rights reserved. */ #include "nncov_traceNorm_blas.hpp" #include "../data.hpp" #include <math.h> #include <memory> #include <cstdlib> #include <algorithm> #include <limits> #include <cassert> #include "blashelper_gpu.hpp" #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) // We import this Macro function from our Caffe Implementation inline int GET_BLOCKS(const int N) { return (N + VL_CUDA_NUM_THREADS - 1) / VL_CUDA_NUM_THREADS; // We import this function from our Caffe Implementation } template<typename T> __global__ void set_kernel(const ptrdiff_t n, const T alpha, T* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template<typename T> void gpuMemset(const ptrdiff_t n, const T alpha, T* y) { if(alpha == 0){ cudaMemset(y, 0, sizeof(T)*n); } set_kernel<T><<<GET_BLOCKS(n),VL_CUDA_NUM_THREADS>>>(n , alpha, y); } template<typename T> __global__ void init_I_kernel(T* a, T alpha, const ptrdiff_t n) { CUDA_KERNEL_LOOP(index,n){ a[index*(n+1)] = alpha; } } template<typename T> __global__ void traceNormBackward_kernel(T* a, T const* alpha, T* beta, T const* sigma, ptrdiff_t n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { a[i*(n+1)] = a[i*(n+1)] - beta[0]/(alpha[0] * alpha[0]) + sigma[0]/(2.0f * sqrt(alpha[0])); } } template<typename T> __host__ void traceNormBackward_gpu(T* a, T const* alpha, T* beta, T const* sigma, ptrdiff_t n) { traceNormBackward_kernel<T> <<<GET_BLOCKS(n),VL_CUDA_NUM_THREADS>>>(a,alpha,beta,sigma,n); } template<typename T> __global__ void copy_kernel(T* a, T* b, ptrdiff_t n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ a[i] = b[i]; } } template<typename T> __global__ void scale_kernel(T* a, T const* b, T const* alpha, ptrdiff_t n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ a[i] = (1.0f / alpha[0]) * b[i]; } } template<typename T> __global__ void scale2_kernel(T* a, T const* b, T const* alpha, ptrdiff_t n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ a[i] = sqrt(alpha[0]) * b[i]; } } template<typename T> __global__ void mul_kernel(T* a, T const* alpha, ptrdiff_t n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ a[i] = a[i] / ( 2.0f * alpha[0]); } } namespace vl { namespace impl { template<typename T,vl::DataType dataType> struct cov_traceNorm<vl::VLDT_GPU,T,dataType> { static vl::ErrorCode forward(Context& context, T* output, T const* data, T* aux_T, size_t height, size_t width, size_t depth, size_t num) { vl::ErrorCode error; ptrdiff_t m = height,n = width,L = num,d,i; ptrdiff_t dataOffset; ptrdiff_t aux_TOffset; ptrdiff_t outputOffset; unsigned int workspaceSize = (unsigned int)(n*n); T* workspace = (T*)context.getWorkspace(vl::VLDT_GPU , workspaceSize*sizeof(T)); T* I1 = workspace; gpuMemset(n*n, T(0), I1); init_I_kernel<T><<<GET_BLOCKS(n),VL_CUDA_NUM_THREADS>>>(I1,(T)1,n); for(d = 0;d < L; d++){ // Trace Norm aux_TOffset = d; outputOffset = d*n*n; dataOffset = d*n*n; error = vl::impl::blas<vl::VLDT_GPU, dataType>::dot(context, n*n, data + dataOffset,ptrdiff_t(1), I1,ptrdiff_t(1), aux_T + aux_TOffset); if(error != vl::VLE_Success) {goto done ;} } for(d = 0;d < L; d++){ aux_TOffset = d; outputOffset = d*n*n; dataOffset = d*n*n; scale_kernel<T><<<GET_BLOCKS(n*n),VL_CUDA_NUM_THREADS>>>(output + outputOffset,data + dataOffset,aux_T + aux_TOffset,n*n); } done: return context.passError(error, __func__); } static vl::ErrorCode backward(Context& context, T* derData, T const* data, T const* derOutput, T const* derOutput_aux, T const* aux_T, size_t height, size_t width, size_t depth, size_t num) { vl::ErrorCode error; ptrdiff_t m = height,n = width,L = num,d; ptrdiff_t derDataOffset,aux_TOffset,derOutputOffset; ptrdiff_t dataOffset,derOutput_auxOffset,P_dot_dLdPOffset; unsigned int workspaceSize = (unsigned int)(L); T* workspace = (T*)context.getWorkspace(vl::VLDT_GPU , workspaceSize*sizeof(T)); T* P_dot_dLdP = workspace; /*debug mxArray *x; mxClassID classID = mxSINGLE_CLASS ; mwSize dim[2]; dim[0] = n;dim[1] = n; x = mxCreateNumericArray(2,dim,classID,mxREAL); T *test = (T*)mxGetData(x); memcpy(test,YZ,sizeof(T)*n*n); mexCallMATLAB(0,NULL,1,&x,"Watch"); */ for(d = 0;d < L;d++){ dataOffset = d*m*n; derDataOffset = d*m*n; derOutputOffset = d*m*n; derOutput_auxOffset = d; aux_TOffset = d; P_dot_dLdPOffset = d; error = vl::impl::blas<vl::VLDT_GPU, dataType>::dot(context, n*n, data + dataOffset,ptrdiff_t(1), derOutput + derOutputOffset,ptrdiff_t(1), P_dot_dLdP + P_dot_dLdPOffset); if(error != vl::VLE_Success) {goto done ;} } for(d = 0;d < L; d++){ dataOffset = d*m*n; derDataOffset = d*m*n; derOutputOffset = d*m*n; derOutput_auxOffset = d; aux_TOffset = d; P_dot_dLdPOffset = d; scale_kernel<T><<<GET_BLOCKS(n*n),VL_CUDA_NUM_THREADS>>>(derData + derDataOffset,derOutput + derOutputOffset,aux_T + aux_TOffset,n*n); traceNormBackward_gpu(derData + derDataOffset,aux_T + aux_TOffset,P_dot_dLdP + P_dot_dLdPOffset,derOutput_aux + derOutput_auxOffset, n); } done: return context.passError(error, __func__); } static vl::ErrorCode forward_aux(Context& context, T* output, T const* data, T* aux_T, size_t height, size_t width, size_t depth, size_t num) { vl::ErrorCode error; ptrdiff_t length = depth,L = num,d; ptrdiff_t outputOffset,aux_TOffset,dataOffset; for(d = 0; d < L; d++){ outputOffset = d*length; dataOffset = d*length; aux_TOffset = d; /*error = vl::impl::blas<vl::VLDT_CPU, dataType>::scal(context, length, alpha, output + OutputOffset,ptrdiff_t(1)); if(error != vl::VLE_Success) {goto done ;}*/ scale2_kernel<T><<<GET_BLOCKS(length),VL_CUDA_NUM_THREADS>>>(output + outputOffset,data + dataOffset,aux_T + aux_TOffset,length); error = vl::VLE_Success; } done: return context.passError(error, __func__); } static vl::ErrorCode backward_aux(Context& context, T* derData, T* derData_aux, T const* data, T const* derOutput, T const* aux_T, size_t height, size_t width, size_t depth, size_t num) { vl::ErrorCode error; ptrdiff_t length = depth,L = num,d; ptrdiff_t dataOffset,aux_TOffset; ptrdiff_t derDataOffset,derData_auxOffset; ptrdiff_t derOutputOffset; for(d = 0; d < L; d++){ derDataOffset = d*length; derOutputOffset = d*length; derData_auxOffset = d; dataOffset = d*length; aux_TOffset= d; error = vl::impl::blas<vl::VLDT_GPU, dataType>::dot(context, length, data + dataOffset,ptrdiff_t(1), derOutput + derOutputOffset,ptrdiff_t(1), derData_aux + derData_auxOffset); if(error != vl::VLE_Success) {goto done ;} } for(d = 0; d < L; d++){ derDataOffset = d*length; derOutputOffset = d*length; derData_auxOffset = d; dataOffset = d*length; aux_TOffset= d; scale2_kernel<T><<<GET_BLOCKS(length),VL_CUDA_NUM_THREADS>>>(derData + derDataOffset,derOutput + derOutputOffset,aux_T + aux_TOffset,length); } done: return context.passError(error, __func__); } }; } } template struct vl::impl::cov_traceNorm<vl::VLDT_GPU, float,vl::VLDT_Float> ; #ifdef ENABLE_DOUBLE template struct vl::impl::cov_traceNorm<vl::VLDT_GPU, double, vl::VLDT_Double> ; #endif
ed04f8aeac1997f45f6d99f37bb83ac6eba585d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from sparse-iter/blas/zgeellmv.cu normal z -> s, Tue Feb 9 16:05:40 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 // ELLPACK SpMV kernel //Michael Garland __global__ void sgeellmv_kernel( int num_rows, int num_cols, int num_cols_per_row, float alpha, float * dval, magma_index_t * dcolind, float * dx, float beta, float * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows) { float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_cols_per_row * row + n ]; float val = dval [ num_cols_per_row * row + n ]; if ( val != 0) dot += val * dx[col ]; } dy[ row ] = dot * alpha + beta * dy [ row ]; } } // shifted ELLPACK SpMV kernel //Michael Garland __global__ void sgeellmv_kernel_shift( int num_rows, int num_cols, int num_cols_per_row, float alpha, float lambda, float * dval, magma_index_t * dcolind, float * dx, float beta, int offset, int blocksize, magma_index_t * addrows, float * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows) { float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_cols_per_row * row + n ]; float val = dval [ num_cols_per_row * row + n ]; if ( val != 0) dot += val * dx[col ]; } if ( row < blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. Input format is ELLPACK. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha float scalar multiplier @param[in] dval magmaFloat_ptr array containing values of A in ELLPACK @param[in] dcolind magmaIndex_ptr columnindices of A in ELLPACK @param[in] dx magmaFloat_ptr input vector x @param[in] beta float scalar multiplier @param[out] dy magmaFloat_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_sgeellmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, float alpha, magmaFloat_ptr dval, magmaIndex_ptr dcolind, magmaFloat_ptr dx, float beta, magmaFloat_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( sgeellmv_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU. Input format is ELLPACK. It is the shifted version of the ELLPACK SpMV. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha float scalar multiplier @param[in] lambda float scalar multiplier @param[in] dval magmaFloat_ptr array containing values of A in ELLPACK @param[in] dcolind magmaIndex_ptr columnindices of A in ELLPACK @param[in] dx magmaFloat_ptr input vector x @param[in] beta float scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaFloat_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_sgeellmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, float alpha, float lambda, magmaFloat_ptr dval, magmaIndex_ptr dcolind, magmaFloat_ptr dx, float beta, magma_int_t offset, magma_int_t blocksize, magmaIndex_ptr addrows, magmaFloat_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( sgeellmv_kernel_shift), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, nnz_per_row, alpha, lambda, dval, dcolind, dx, beta, offset, blocksize, addrows, dy ); return MAGMA_SUCCESS; }
ed04f8aeac1997f45f6d99f37bb83ac6eba585d8.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from sparse-iter/blas/zgeellmv.cu normal z -> s, Tue Feb 9 16:05:40 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 // ELLPACK SpMV kernel //Michael Garland __global__ void sgeellmv_kernel( int num_rows, int num_cols, int num_cols_per_row, float alpha, float * dval, magma_index_t * dcolind, float * dx, float beta, float * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows) { float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_cols_per_row * row + n ]; float val = dval [ num_cols_per_row * row + n ]; if ( val != 0) dot += val * dx[col ]; } dy[ row ] = dot * alpha + beta * dy [ row ]; } } // shifted ELLPACK SpMV kernel //Michael Garland __global__ void sgeellmv_kernel_shift( int num_rows, int num_cols, int num_cols_per_row, float alpha, float lambda, float * dval, magma_index_t * dcolind, float * dx, float beta, int offset, int blocksize, magma_index_t * addrows, float * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows) { float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_cols_per_row * row + n ]; float val = dval [ num_cols_per_row * row + n ]; if ( val != 0) dot += val * dx[col ]; } if ( row < blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. Input format is ELLPACK. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha float scalar multiplier @param[in] dval magmaFloat_ptr array containing values of A in ELLPACK @param[in] dcolind magmaIndex_ptr columnindices of A in ELLPACK @param[in] dx magmaFloat_ptr input vector x @param[in] beta float scalar multiplier @param[out] dy magmaFloat_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_sgeellmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, float alpha, magmaFloat_ptr dval, magmaIndex_ptr dcolind, magmaFloat_ptr dx, float beta, magmaFloat_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; sgeellmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU. Input format is ELLPACK. It is the shifted version of the ELLPACK SpMV. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha float scalar multiplier @param[in] lambda float scalar multiplier @param[in] dval magmaFloat_ptr array containing values of A in ELLPACK @param[in] dcolind magmaIndex_ptr columnindices of A in ELLPACK @param[in] dx magmaFloat_ptr input vector x @param[in] beta float scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaFloat_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_sgeellmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, float alpha, float lambda, magmaFloat_ptr dval, magmaIndex_ptr dcolind, magmaFloat_ptr dx, float beta, magma_int_t offset, magma_int_t blocksize, magmaIndex_ptr addrows, magmaFloat_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; sgeellmv_kernel_shift<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, nnz_per_row, alpha, lambda, dval, dcolind, dx, beta, offset, blocksize, addrows, dy ); return MAGMA_SUCCESS; }
20ced1aa2e500e9bc08433ceeea379dd4bb0477c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "defs.h" #include "sifts.h" #include "Gridder.h" #include "Matcher.h" #include <thrust/extrema.h> //============================== DESCRIPTION ===================================== // // This code deals with matching sifts between two images // This code implements the algorithm for GPU Matching. It is assumed that // F-matrix is computed from the coarse k% features and remaining features are // matched using this algorithm. We will match SIFT // features in the query/source image to those in the target image. // //================================================================================ //============================ Declare textures ================================== texture <unsigned char, 1, hipReadModeElementType> tex_sourceDescriptor; texture <unsigned char, 1, hipReadModeElementType> tex_targetDescriptor; void Matcher::visualizeMatches(sift_img source,sift_img target,std::vector<std::pair<int,int> > matches,std::vector<SiftGPU::SiftKeypoint> &keys1,std::vector<SiftGPU::SiftKeypoint> &keys2) { const char* saveAs = NULL; cv::Mat queryImage = source.img; int qWidth = source.width; int qHeight = source.height; cv::Mat referenceImage = target.img; int rWidth = target.width; int rHeight = target.height; int canvasWidth = qWidth + rWidth; int canvasHeight = qHeight > rHeight ? qHeight : rHeight; cv::Mat canvas(canvasHeight, canvasWidth, CV_8UC3, cv::Scalar(0,0,0)); cv::Rect roi1 = cv::Rect(0,0,qWidth,qHeight); cv::Mat canvas_roi1 = canvas(roi1); queryImage.copyTo(canvas_roi1); cv::Rect roi2 = cv::Rect(qWidth,0,rWidth,rHeight); cv::Mat canvas_roi2 = canvas(roi2); referenceImage.copyTo(canvas_roi2); for(int i=0; i < matches.size(); i++) { std::pair<int,int> p = matches[i]; cv::Point pt1 = cv::Point(keys1[p.first].x, keys1[p.first].y); cv::Point pt2 = cv::Point(keys2[p.second].x + qWidth, keys2[p.second].y); cv::circle(canvas, pt1, 2, cv::Scalar(0,255,0), 4); cv::circle(canvas, pt2, 2, cv::Scalar(0,255,0), 4); cv::line(canvas, pt1, pt2, cv::Scalar(0,255,0), 4); } cv::namedWindow("FeatureMatches",cv::WINDOW_NORMAL); imshow("FeatureMatches", canvas); cv::waitKey(); if(saveAs != NULL) { imwrite( saveAs, canvas ); } } struct cast { __host__ __device__ unsigned char operator()(float x) { return ((unsigned char)(512*x)); } }; __global__ void computeGridID(SiftGPU::SiftKeypoint *keys, int *gridID1,int *gridID2,int *gridID3,int *gridID4, int num_keys,int gridSize,int numXGrids1,int halfSize,int numXGrids2,int numGrids,int numGridsXOv,int numGridsYOv) { //=========== Function to compute gridID given siftID =========== // Find data location to be processed by current thread int siftID = blockIdx.x*blockDim.x + threadIdx.x; if(siftID < num_keys) { double simpleX = ((double)keys[siftID].x/(double)gridSize); double simpleY = ((double)keys[siftID].y/(double)gridSize); double ov = (double)halfSize/gridSize; gridID1[siftID] = floor(simpleY)*numXGrids1 + floor(simpleX); gridID2[siftID] = floor(simpleY)*numXGrids2 + floor(simpleX - ov); gridID3[siftID] = floor(simpleY - ov)*numXGrids1 + floor(simpleX); gridID4[siftID] = floor(simpleY -ov)*numXGrids2 + floor(simpleX - ov); if(gridID2[siftID] < 0) gridID2[siftID] = 0; if(gridID3[siftID] < 0) gridID3[siftID] = 0; if(gridID4[siftID] < 0) gridID4[siftID] = 0; // gridID corresponding to the given siftID is computed } __syncthreads(); } __global__ void findTopOfBucket(int *d_gridID,int *d_gridID2,int *d_gridID3,int *d_gridID4, int num_elements,int *d_topOfBucket_ptr,int *d_topOfBucket2_ptr,int *d_topOfBucket3_ptr,int *d_topOfBucket4_ptr,int numGrids,int numGridsXOv,int numGridsYOv,int numGridsXYOv) { // Find data location to be processed by current thread int tidx = blockIdx.x*blockDim.x + threadIdx.x; if(tidx < num_elements && tidx != 0) { d_topOfBucket_ptr[d_gridID[0]] = 0; d_topOfBucket2_ptr[d_gridID2[0]] = 0; d_topOfBucket3_ptr[d_gridID3[0]] = 0; d_topOfBucket4_ptr[d_gridID4[0]] = 0; if (d_gridID[tidx] != d_gridID[tidx-1]) d_topOfBucket_ptr[d_gridID[tidx]] = tidx; if (d_gridID2[tidx] != d_gridID2[tidx-1]) d_topOfBucket2_ptr[d_gridID2[tidx]] = tidx; if (d_gridID3[tidx] != d_gridID3[tidx-1]) d_topOfBucket3_ptr[d_gridID3[tidx]] = tidx; if (d_gridID4[tidx] != d_gridID4[tidx-1]) d_topOfBucket4_ptr[d_gridID4[tidx]] = tidx; } __syncthreads(); } __global__ void findNumSift(int *gridID,int *topOfBucket,int *gridID2,int *topOfBucket2,int *gridID3,int *topOfBucket3,int *gridID4,int *topOfBucket4,int numGrids,int numGridsXOv,int numGridsYOv,int numGridsXYOv,int *numSift,int *numSift2,int *numSift3,int *numSift4) { //=========== Find number of target sifts in each grid =========== // Find data location to be processed by current thread int gridid = blockIdx.x*blockDim.x + threadIdx.x; int i,numsift = 0; if(gridid < numGrids) { if(topOfBucket[gridid]!=-1) { for(i = topOfBucket[gridid]; gridID[i] == gridid; i++) numsift++; numSift[gridid] = numsift; } else numSift[gridid] = 0; } if(gridid < numGridsXOv) { if(topOfBucket2[gridid]!=-1) { numsift = 0; for(i = topOfBucket2[gridid]; gridID2[i] == gridid; i++) numsift++; numSift2[gridid] = numsift; } else numSift2[gridid] = 0; } if(gridid < numGridsYOv) { if(topOfBucket3[gridid]!=-1) { numsift = 0; for(i = topOfBucket3[gridid]; gridID3[i] == gridid; i++) numsift++; numSift3[gridid] = numsift; } else numSift3[gridid] = 0; } if(gridid < numGridsXYOv) { if(topOfBucket4[gridid]!=-1) { numsift = 0; for(i = topOfBucket4[gridid]; gridID4[i] == gridid; i++) numsift++; numSift4[gridid] = numsift; } else numSift4[gridid] = 0; } __syncthreads(); } __global__ void findEpipolarLine(float *fmatrix,SiftGPU::SiftKeypoint *keys,int source_width, int source_height,int width,int height,long long int *epipolarPoints,int source_num_keys) { //============ Find Epipolar Line for each source sift ============ int source_siftID = blockIdx.x*blockDim.x + threadIdx.x; if(source_siftID < source_num_keys) { float epipolarLine[3]; short x1,y1,x2,y2; // border points of line segment inside the target image float top_x, right_y, bottom_x, left_y ; // points of intersection with the top , right , bottom and left lines of the target image border rectangle float x,y; // To Euclidian Coordinate system x = keys[source_siftID].x - ((source_width-1)/2); y = ((source_height-1)/2) - keys[source_siftID].y; // E = F.x epipolarLine[0] = (fmatrix[0]*x) + (fmatrix[1]*y) + fmatrix[2]; epipolarLine[1] = (fmatrix[3]*x) + (fmatrix[4]*y) + fmatrix[5]; epipolarLine[2] = (fmatrix[6]*x) + (fmatrix[7]*y) + fmatrix[8]; //===================================================================== // // In Euclidian Coordinate system (centrailized), // line equations for target image rectangle: // --------------------------------------------------------- // y=(h-1)/2 (top) // x=(w-1)/2 (right) // y=-(h-1)/2 (bottom) // x=-(w-1)/2 (left) // // --------------------------------------------------------- // // So points of intersection with epipolar line ( ax + by + c = 0 ) are : // --------------------------------------------------------- // x = (-b(h-1)/2 -c )/a, y = (h-1)/2 // x = (w-1)/2, y = (-c-(w-1)a/2)/b // x = (-c+(h-1)b/2)/a , y = -(h-1)/2 // x = -(w-1)/2 , y =( -c+(w-1)a/2)/b // //===================================================================== //================== Find points of intersection ====================== top_x = ((-1*epipolarLine[1]*(height-1)/2)-(1*epipolarLine[2]))/epipolarLine[0] ; right_y =((-1*epipolarLine[2])-((width-1)*epipolarLine[0]/2))/epipolarLine[1]; bottom_x =((-1*epipolarLine[2])+((height-1)*epipolarLine[1]/2))/epipolarLine[0]; left_y =((-1*epipolarLine[2])+((width-1)*epipolarLine[0]/2))/epipolarLine[1]; //===================================================================== // Now these points (top , bottom ,left, right) are in Euclidian Coordinate // system ((0,0) at the centre of the image) // Back to graphics coordinate system ((0,0) at the top left corner of // image and downwards is +ve y) top_x = top_x + ((width-1)/2); bottom_x = bottom_x + ((width-1)/2); right_y = ((height-1)/2) - right_y; left_y = ((height-1)/2) - left_y; //===================================================================== //===================================================================== // // -> Now only two of these points will lie on the target image border rectangle // -> They are (x1,y1) and (x2,y2) // -> Note : x1 < x2 // //===================================================================== x1 = x2 = y1 = y2 = 0; //====================== Find x1, x2, y1, y2 ========================== if(left_y >= 0 && left_y <= height-1) { if(top_x >= 0 && top_x <= width-1) { x1 = 0; y1 = (short)left_y; x2 = (short)top_x; y2 = 0; } else if(bottom_x >= 0 && bottom_x <= width-1) { x1 = 0; y1 = (short)left_y; x2 = (short)bottom_x; y2 = height-1; } else if(right_y >= 0 && right_y <= height-1) { x1 = 0; y1 = (short)left_y; x2 = width-1; y2 = (short)right_y; } } else if(top_x >= 0 && top_x <= width-1) { if(right_y >= 0 && right_y <= height-1) { x1 = (short)top_x; y1 = 0; x2 = width-1; y2 = (short)right_y; } else if(bottom_x >= 0 && bottom_x <= width-1) { if(top_x < bottom_x) { x1 = (short)top_x; y1 = 0; x2 = (short)bottom_x; y2 = height-1; } else { x2 = (short)top_x; y2 = height-1; x1 = (short)bottom_x; y1 = 0; } } } else if(bottom_x >= 0 && bottom_x <= width-1) { if(right_y >= 0 && right_y <= height-1) { x1 = (short)bottom_x; y1 = height-1; x2 = width-1; y2 = (short)right_y; } } //===================================================================== //======= Packing all these points in a single long long int ========== long long int l = 0; l = l | (long long int)x1 << 48; l = l | (long long int)y1 << 32; l = l | (long long int)x2 << 16; l = l | (long long int)y2 << 0; epipolarPoints[source_siftID] = l; //===================================================================== } __syncthreads(); // Done ! } __global__ void clusterLines(long long int *d_epipolarPoints,int *cluster,int source_num_keys) { //===================================================================== // // d_epipolarPoints is a sorted array of packed points through which // we extract the end points of an epipolar line. If there is a // significant difference between two adjacent end points (say previous // and current), we mark the current one as 1 else 0 and store it in // cluster[current]. By default cluster[0] = 0. In this way all the // points between 1 to the next 1 fall in one group // //===================================================================== int id = blockIdx.x*blockDim.x + threadIdx.x; // id = 0 : first entry handled on the cpu cluster[0] = 0 if( id < source_num_keys && id > 0) { unsigned long long int s = 0,l; short x1,y1,x2,y2; short prev_x1,prev_y1,prev_x2,prev_y2; s = ~s; s = s >> 48; // Extract the points from the packed point l = d_epipolarPoints[id]; x1 = (short)(s & (l >> 48)); y1 = (short)(s & (l >> 32)); x2 = (short)(s & (l >> 16)); y2 = (short)(s & (l)); // Extract the points from the packed point l = d_epipolarPoints[id-1]; prev_x1 = (short)(s & (l >> 48)); prev_y1 = (short)(s & (l >> 32)); prev_x2 = (short)(s & (l >> 16)); prev_y2 = (short)(s & (l)); if(((x1-prev_x1)*(x1-prev_x1) < 4) && ((y1-prev_y1)*(y1-prev_y1)< 4) && ((x2-prev_x2)*(x2-prev_x2) < 4) && ((y2-prev_y2)*(y2-prev_y2) < 4)) // If no significant difference between current and previous { cluster[id] = 0; } else { cluster[id] = 1; } } __syncthreads(); // Done ! } __global__ void findClusterLocation(int *cluster, int source_num_keys,int *clusterLocation,int *source_siftID,int *clusterID) { //=================================================================== // // Current scene example // +-----------+---+---+---+---+---+---+---+---+----- // | tidx | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | ... // +-----------+---+---+---+---+---+---+---+---+----- // | siftid | a | b | c | d | e | f | g | h |... // +-----------+---+---+---+---+---+---+---+---+----- // | cluster | 0 | 1 | 1 | 2 | 3 | 3 | 3 | 4 |... // +-----------+---+---+---+---+---+---+---+---+----- // // So we want cluster location to be like this so that it // is easy to find out members of one cluster // +------------------+---+---+---+---+---+-------- // | cluster_id | 0 | 1 | 2 | 3 | 4 | ... // +------------------+---+---+---+---+---+-------- // | cluster_location | 0 | 1 | 3 | 4 | 7 | ... // +------------------+---+---+---+---+---+-------- // //=================================================================== // tidx = 0 handled on the CPU int tidx = blockIdx.x*blockDim.x + threadIdx.x; if(tidx < source_num_keys) { if( tidx != 0) { if (cluster[tidx] != cluster[tidx-1]) clusterLocation[cluster[tidx]] = tidx; } clusterID[source_siftID[tidx]] = cluster[tidx]; } __syncthreads(); } __device__ void getBestGrid(float x, float y, int gridSize,int halfSize,int numXGrids1,int numXGrids2,int numGridsXOv,int numGridsYOv,int *gridid, int *which,int numGrids) { int idx[4] = {0,0,0,0}; float dists[4] = {0,0,0,0}; double simpleX = ((double)x/gridSize); double simpleY = ((double)y/gridSize); double ov = (double)halfSize/gridSize; idx[0] = floor(simpleY)*numXGrids1 + floor(simpleX); idx[1] = floor(simpleY)*numXGrids2 + floor(simpleX - ov); idx[2] = floor(simpleY - ov)*numXGrids1 + floor(simpleX); idx[3] = floor(simpleY -ov)*numXGrids2 + floor(simpleX - ov); if(idx[1] < 0) idx[1] = 0; if(idx[2] < 0) idx[2] = 0; if(idx[3] < 0) idx[3] = 0; float g1Xc = (floor(simpleX))*gridSize + (float)halfSize; float g1Yc = (floor(simpleY))*gridSize + (float)halfSize; float g2Xc = (floor(simpleX - ov))*gridSize +(float) 2*halfSize; //float g2Yc = g1Yc; //float g3Xc = g1Xc; float g3Yc = (floor(simpleY - ov))*gridSize + (float)2*halfSize; //float g4Xc = g2Xc; //float g4Yc = g3Yc; float x1_d = (g1Xc - x)*(g1Xc - x); float y1_d = (g1Yc - y)*(g1Yc - y); float x2_d = (g2Xc - x)*(g2Xc - x); float y2_d = (g3Yc - y)*(g3Yc - y); dists[0] = x1_d + y1_d; dists[1] = x2_d + y1_d; dists[2] = x1_d + y2_d; dists[3] = x2_d + y2_d; float min_g_dist = 200000; int minIdx = -1; int idno = 0; for(int id=0; id < 4; id++) { if(dists[id] < min_g_dist && idx[id] >= 0) { idno = id+1; min_g_dist = dists[id]; minIdx = idx[id]; } } *gridid = minIdx; *which = idno; // *gridid = idx[0]; // *which = 1; } __global__ void findNumPotentialMatchesForEachCluster(int *gridID,int *target_siftID,int *topOfBucket,int *gridID2,int *target_siftID2,int *topOfBucket2,int *gridID3,int *target_siftID3,int *topOfBucket3,int *gridID4,int *target_siftID4,int *topOfBucket4,int gridSize,int *clusterLocation,long long int *epipolarPoints,int *numSift,int *numSift2,int *numSift3,int *numSift4,int target_height,int numXGrids1,int halfSize,int numXGrids2,int numGridsXOv,int numGridsYOv,int numGrids,int numGridsXYOv,int *numPotentialMatches) { //===================================================================== // // For each cluster, load the epipolar line and mark eqidistant points // on that line. Now find the respective grids of these points. // After knowing which all grids are a part of the epipolar line, The // potential matches are just the target sifts in those grids. // //===================================================================== int clusterID = blockIdx.x; int threadID = threadIdx.x; int clusterlocation = clusterLocation[clusterID]; //potentialMatches[maxNumPotentialMatches*clusterID] = 0; extern __shared__ int shared_array[]; int i,j,k; int distance, d, num_of_points, num_of_pts_per_thread; float cos_theta,sin_theta,x,y,slope; unsigned long long int s = 0,l; short x1,y1,x2,y2; s = ~s; s = s >> 48; l = epipolarPoints[clusterlocation]; // Extract the points from the packed point l x1 = (short)(s & (l >> 48)); y1 = (short)(s & (l >> 32)); x2 = (short)(s & (l >> 16)); y2 = (short)(s & (l)); distance = (int) sqrt((float)((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2))); d = gridSize ; // equidistant points at d distance num_of_points = ceil((float)distance/gridSize); if(distance > 5000) { num_of_points = (5000/gridSize); d = distance/num_of_points; } num_of_pts_per_thread = ceil((float)num_of_points/blockDim.x); if((x2-x1) != 0) // slope is not infinity { slope = (float)(y2-y1)/(x2-x1); cos_theta = 1/sqrt(1+(slope*slope)); sin_theta = slope/sqrt(1+(slope*slope)); } else // slope is infinite : vertical epipolar line { cos_theta = 0; sin_theta = 1; } // Compute gridIDs for num_of_pts_per_thread //========================================================================== // // Size of the following array = 10 // Reason : I assume that num_of_pts_per_thread can be 10 at max // if so then num_of_pts = 256(block_size)x10 = 2560 // assumption : min gridSize = 2 (4 actually :P ) // so length = 2x2560 = 5120 (we have already assumed max length to be 5000) // therefore no problem with this assumption :D // //========================================================================== int totalNumsift,top; int grids[10] = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}; int which[10] = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}; //int gridx,gridy,grid_id; int grid_id; totalNumsift = 0; top = -1; int which_grid = 0; for(i = 0 ; i < num_of_pts_per_thread ; i++ ) { // threadID*num_of_pts_per_thread th point x = (float)(x1+(((threadID*num_of_pts_per_thread)+i)*cos_theta*d)); y = (float)(y1+(((threadID*num_of_pts_per_thread)+i)*sin_theta*d)); if(top == 10) break; if( x >= (float)x1 && x <= (float)x2 && y <= (float)target_height-1 && y >= 0.0) // the point lies in the rectangle { // get best grid id // Compute gridID of the point getBestGrid(x, y,gridSize, halfSize,numXGrids1, numXGrids2, numGridsXOv,numGridsYOv,&grid_id,&which_grid,numGrids); if(top == -1) { if(which_grid == 1) { totalNumsift += numSift[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 2) { totalNumsift += numSift2[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 3) { totalNumsift += numSift3[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 4) { totalNumsift += numSift4[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } } else if(grid_id!=grids[top]) { if(which_grid == 1) { totalNumsift += numSift[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 2) { totalNumsift += numSift2[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 3) { totalNumsift += numSift3[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 4) { totalNumsift += numSift4[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } } } } shared_array[threadID] = totalNumsift; __syncthreads(); //========================================================================= // Find the number of target sifts each thread has to process // and write it into shared memory // to make sure that all the threads have written their num of sifts in the shared memory // Now we need to compute the prefix sum of this shared array which has 256 elements as of now // to know the position to write in the global memory for each thread //=========================== Prefix Sum ==================================== //================================ Upsweep ================================== if(threadID < 128) shared_array[256+threadID] = shared_array[2*threadID]+shared_array[(2*threadID)+1]; __syncthreads(); if(threadID < 64) shared_array[256+128+threadID] = shared_array[256+(2*threadID)]+shared_array[256+(2*threadID)+1]; __syncthreads(); if(threadID < 32) shared_array[256+128+64+threadID] = shared_array[256+128+(2*threadID)]+shared_array[256+128+(2*threadID)+1]; __syncthreads(); if(threadID < 16) shared_array[256+128+64+32+threadID] = shared_array[256+128+64+(2*threadID)]+shared_array[256+128+64+(2*threadID)+1]; __syncthreads(); if(threadID < 8) shared_array[256+128+64+32+16+threadID] = shared_array[256+128+64+32+(2*threadID)]+shared_array[256+128+64+32+(2*threadID)+1]; __syncthreads(); if(threadID < 4) shared_array[256+128+64+32+16+8+threadID] = shared_array[256+128+64+32+16+(2*threadID)]+shared_array[256+128+64+32+16+(2*threadID)+1]; __syncthreads(); if(threadID < 2) shared_array[256+128+64+32+16+8+4+threadID] = shared_array[256+128+64+32+16+8+(2*threadID)]+shared_array[256+128+64+32+16+8+(2*threadID)+1]; __syncthreads(); if(threadID < 1) shared_array[256+128+64+32+16+8+4+2+threadID] = shared_array[256+128+64+32+16+8+4+(2*threadID)]+shared_array[256+128+64+32+16+8+4+(2*threadID)+1]; __syncthreads(); //======================== Downsweep ============================== if(threadID < 2 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+16+8+4+threadID] = shared_array[256+128+64+32+16+8+4+2+(threadID/2)]; else shared_array[256+128+64+32+16+8+4+threadID] += shared_array[256+128+64+32+16+8+4+2+(threadID/2)-1]; } __syncthreads(); if(threadID < 4 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+16+8+threadID] = shared_array[256+128+64+32+16+8+4+(threadID/2)]; else shared_array[256+128+64+32+16+8+threadID] += shared_array[256+128+64+32+16+8+4+(threadID/2)-1]; } __syncthreads(); if(threadID < 8 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+16+threadID] = shared_array[256+128+64+32+16+8+(threadID/2)]; else shared_array[256+128+64+32+16+threadID] += shared_array[256+128+64+32+16+8+(threadID/2)-1]; } __syncthreads(); if(threadID < 16 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+threadID] = shared_array[256+128+64+32+16+(threadID/2)]; else shared_array[256+128+64+32+threadID] += shared_array[256+128+64+32+16+(threadID/2)-1]; } __syncthreads(); if(threadID < 32 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+threadID] = shared_array[256+128+64+32+(threadID/2)]; else shared_array[256+128+64+threadID] += shared_array[256+128+64+32+(threadID/2)-1]; } __syncthreads(); if(threadID < 64 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+threadID] = shared_array[256+128+64+(threadID/2)]; else shared_array[256+128+threadID] += shared_array[256+128+64+(threadID/2)-1]; } __syncthreads(); if(threadID < 128 && threadID!=0) { if(threadID%2 != 0) shared_array[256+threadID] = shared_array[256+128+(threadID/2)]; else shared_array[256+threadID] += shared_array[256+128+(threadID/2)-1]; } __syncthreads(); if(threadID < 256 && threadID!=0) { if(threadID%2 != 0) shared_array[threadID] = shared_array[256+(threadID/2)]; else shared_array[threadID] += shared_array[256+(threadID/2)-1]; } __syncthreads(); //========================================================================== // // Now shared array[0..255] contains prefix sum // There is an array of size maxNumPotentialMatches for each cluster, we will write the potential matches there // The 1st entry would be the total number of potential matches and // then each thread will write the matching sift ids it found at write position // //========================================================================== if(threadID == 0) { // printf("%d \n",shared_array[255]); numPotentialMatches[clusterID] = shared_array[255]; } __syncthreads(); } __global__ void findPotentialMatchesForEachCluster(int *gridID,int *target_siftID,int *topOfBucket,int *gridID2,int *target_siftID2,int *topOfBucket2,int *gridID3,int *target_siftID3,int *topOfBucket3,int *gridID4,int *target_siftID4,int *topOfBucket4,int gridSize,int *clusterLocation,long long int *epipolarPoints,int *numSift,int *numSift2,int *numSift3,int *numSift4,int *potentialMatches,int target_height,int numXGrids1,int halfSize,int numXGrids2,int numGridsXOv,int numGridsYOv,int numGrids,int numGridsXYOv,int maxNumPotentialMatches) { //===================================================================== // // For each cluster, load the epipolar line and mark eqidistant points // on that line. Now find the respective grids of these points. // After knowing which all grids are a part of the epipolar line, The // potential matches are just the target sifts in those grids. // //===================================================================== int clusterID = blockIdx.x; int threadID = threadIdx.x; int clusterlocation = clusterLocation[clusterID]; potentialMatches[(2+maxNumPotentialMatches)*clusterID] = 0; extern __shared__ int shared_array[]; int i,j,k; int distance, d, num_of_points, num_of_pts_per_thread; float cos_theta,sin_theta,x,y,slope; unsigned long long int s = 0,l; short x1,y1,x2,y2; s = ~s; s = s >> 48; l = epipolarPoints[clusterlocation]; // Extract the points from the packed point l x1 = (short)(s & (l >> 48)); y1 = (short)(s & (l >> 32)); x2 = (short)(s & (l >> 16)); y2 = (short)(s & (l)); distance = (int) sqrt((float)((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2))); d = gridSize ; // equidistant points at d distance num_of_points = ceil((float)distance/gridSize); if(distance > 5000) { num_of_points = (5000/gridSize); d = distance/num_of_points; } num_of_pts_per_thread = ceil((float)num_of_points/blockDim.x); if((x2-x1) != 0) // slope is not infinity { slope = (float)(y2-y1)/(x2-x1); cos_theta = 1/sqrt(1+(slope*slope)); sin_theta = slope/sqrt(1+(slope*slope)); } else // slope is infinite : vertical epipolar line { cos_theta = 0; sin_theta = 1; } // Compute gridIDs for num_of_pts_per_thread //========================================================================== // // Size of the following array = 10 // Reason : I assume that num_of_pts_per_thread can be 10 at max // if so then num_of_pts = 256(block_size)x10 = 2560 // assumption : min gridSize = 2 (4 actually :P ) // so length = 2x2560 = 5120 (we have already assumed max length to be 5000) // therefore no problem with this assumption :D // //========================================================================== int totalNumsift,top; int grids[10] = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}; int which[10] = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}; //int gridx,gridy,grid_id; int grid_id; totalNumsift = 0; top = -1; int which_grid = 0; for(i = 0 ; i < num_of_pts_per_thread ; i++ ) { // threadID*num_of_pts_per_thread th point x = (float)(x1+(((threadID*num_of_pts_per_thread)+i)*cos_theta*d)); y = (float)(y1+(((threadID*num_of_pts_per_thread)+i)*sin_theta*d)); if(top == 10) break; if( x >= (float)x1 && x <= (float)x2 && y <= (float)target_height-1 && y >= 0.0) // the point lies in the rectangle { // get best grid id // Compute gridID of the point getBestGrid(x, y,gridSize, halfSize,numXGrids1, numXGrids2, numGridsXOv,numGridsYOv,&grid_id,&which_grid,numGrids); if(top == -1) { if(which_grid == 1) { totalNumsift += numSift[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 2) { totalNumsift += numSift2[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 3) { totalNumsift += numSift3[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 4) { totalNumsift += numSift4[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } } else if(grid_id!=grids[top]) { if(which_grid == 1) { totalNumsift += numSift[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 2) { totalNumsift += numSift2[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 3) { totalNumsift += numSift3[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 4) { totalNumsift += numSift4[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } } } } shared_array[threadID] = totalNumsift; __syncthreads(); //========================================================================= // Find the number of target sifts each thread has to process // and write it into shared memory // to make sure that all the threads have written their num of sifts in the shared memory // Now we need to compute the prefix sum of this shared array which has 256 elements as of now // to know the position to write in the global memory for each thread //=========================== Prefix Sum ==================================== //================================ Upsweep ================================== if(threadID < 128) shared_array[256+threadID] = shared_array[2*threadID]+shared_array[(2*threadID)+1]; __syncthreads(); if(threadID < 64) shared_array[256+128+threadID] = shared_array[256+(2*threadID)]+shared_array[256+(2*threadID)+1]; __syncthreads(); if(threadID < 32) shared_array[256+128+64+threadID] = shared_array[256+128+(2*threadID)]+shared_array[256+128+(2*threadID)+1]; __syncthreads(); if(threadID < 16) shared_array[256+128+64+32+threadID] = shared_array[256+128+64+(2*threadID)]+shared_array[256+128+64+(2*threadID)+1]; __syncthreads(); if(threadID < 8) shared_array[256+128+64+32+16+threadID] = shared_array[256+128+64+32+(2*threadID)]+shared_array[256+128+64+32+(2*threadID)+1]; __syncthreads(); if(threadID < 4) shared_array[256+128+64+32+16+8+threadID] = shared_array[256+128+64+32+16+(2*threadID)]+shared_array[256+128+64+32+16+(2*threadID)+1]; __syncthreads(); if(threadID < 2) shared_array[256+128+64+32+16+8+4+threadID] = shared_array[256+128+64+32+16+8+(2*threadID)]+shared_array[256+128+64+32+16+8+(2*threadID)+1]; __syncthreads(); if(threadID < 1) shared_array[256+128+64+32+16+8+4+2+threadID] = shared_array[256+128+64+32+16+8+4+(2*threadID)]+shared_array[256+128+64+32+16+8+4+(2*threadID)+1]; __syncthreads(); //======================== Downsweep ============================== if(threadID < 2 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+16+8+4+threadID] = shared_array[256+128+64+32+16+8+4+2+(threadID/2)]; else shared_array[256+128+64+32+16+8+4+threadID] += shared_array[256+128+64+32+16+8+4+2+(threadID/2)-1]; } __syncthreads(); if(threadID < 4 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+16+8+threadID] = shared_array[256+128+64+32+16+8+4+(threadID/2)]; else shared_array[256+128+64+32+16+8+threadID] += shared_array[256+128+64+32+16+8+4+(threadID/2)-1]; } __syncthreads(); if(threadID < 8 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+16+threadID] = shared_array[256+128+64+32+16+8+(threadID/2)]; else shared_array[256+128+64+32+16+threadID] += shared_array[256+128+64+32+16+8+(threadID/2)-1]; } __syncthreads(); if(threadID < 16 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+threadID] = shared_array[256+128+64+32+16+(threadID/2)]; else shared_array[256+128+64+32+threadID] += shared_array[256+128+64+32+16+(threadID/2)-1]; } __syncthreads(); if(threadID < 32 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+threadID] = shared_array[256+128+64+32+(threadID/2)]; else shared_array[256+128+64+threadID] += shared_array[256+128+64+32+(threadID/2)-1]; } __syncthreads(); if(threadID < 64 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+threadID] = shared_array[256+128+64+(threadID/2)]; else shared_array[256+128+threadID] += shared_array[256+128+64+(threadID/2)-1]; } __syncthreads(); if(threadID < 128 && threadID!=0) { if(threadID%2 != 0) shared_array[256+threadID] = shared_array[256+128+(threadID/2)]; else shared_array[256+threadID] += shared_array[256+128+(threadID/2)-1]; } __syncthreads(); if(threadID < 256 && threadID!=0) { if(threadID%2 != 0) shared_array[threadID] = shared_array[256+(threadID/2)]; else shared_array[threadID] += shared_array[256+(threadID/2)-1]; } __syncthreads(); //========================================================================== // // Now shared array[0..255] contains prefix sum // There is an array of size maxNumPotentialMatches for each cluster, we will write the potential matches there // The 1st entry would be the total number of potential matches and // then each thread will write the matching sift ids it found at write position // //========================================================================== potentialMatches[(2+maxNumPotentialMatches)*clusterID] = shared_array[255]; // total num of potential matches int writePosition = 1; if(threadID > 0) writePosition = shared_array[threadID-1] + 1; __syncthreads(); k = 0; for(i=0;i<=top;i++) { if(which[i] == 1) { if(topOfBucket[grids[i]]!=-1) { for(j = topOfBucket[grids[i]]; grids[i] == gridID[j]; j++) { potentialMatches[((2+maxNumPotentialMatches)*clusterID)+writePosition+k] = target_siftID[j]; k++; } } } else if(which[i] == 2) { if(topOfBucket2[grids[i]]!=-1) { for(j = topOfBucket2[grids[i]]; grids[i] == gridID2[j]; j++) { potentialMatches[((2+maxNumPotentialMatches)*clusterID)+writePosition+k] = target_siftID2[j]; k++; } } } else if(which[i] == 3) { if(topOfBucket3[grids[i]]!=-1) { for(j = topOfBucket3[grids[i]]; grids[i] == gridID3[j]; j++) { potentialMatches[((2+maxNumPotentialMatches)*clusterID)+writePosition+k] = target_siftID3[j]; k++; } } } else if(which[i] == 4) { if(topOfBucket4[grids[i]]!=-1) { for(j = topOfBucket4[grids[i]]; grids[i] == gridID4[j]; j++) { potentialMatches[((2+maxNumPotentialMatches)*clusterID)+writePosition+k] = target_siftID4[j]; k++; } } } } __syncthreads(); } __device__ void findmin1min2(int a, int b , int c, int d,int a_id,int b_id,int c_id,int d_id,int *min1,int *min2,int *min1_id,int *min2_id) { //======================================================================== // This chunk of code finds the minimum and next minimun and correnponding // minimum and next minimum ID between a,b,c,d //======================================================================== if( a < b && a < c && a < d ) { *min1 = a; *min1_id = a_id; if(b < c && b < d) { *min2 = b; *min2_id = b_id; } else if(c < d) { *min2 = c; *min2_id = c_id; } else { *min2 = d; *min2_id = d_id; } } else if(b < c && b < d) { *min1 = b; *min1_id = b_id; if(a < c && a < d) { *min2 = a; *min2_id = a_id; } else if(c < d) { *min2 = c; *min2_id = c_id; } else { *min2 = d; *min2_id = d_id; } } else if(c < d) { *min1 = c; *min1_id = c_id; if(a < b && a < d) { *min2 = a; *min2_id = a_id; } else if(b < d) { *min2 = b; *min2_id = b_id; } else { *min2 = d; *min2_id = d_id; } } else { *min1 = d; *min1_id = d_id; if(a < b && a < c) { *min2 = a; *min2_id = a_id; } else if(b < c) { *min2 = b; *min2_id = b_id; } else { *min2 = c; *min2_id = c_id; } } } __global__ void findMatches(int *matches,int *clusterID,int *potentialMatches,int maxNumPotentialMatches) { //======================================================================== // // For each source sift, find the cluster it belongs to // Then load the potential matching target sifts of that cluster and // compute the L2 distance between the descriptors. // Now find the min and next min of these distances. // Declare a match based on some tests. // //======================================================================== int source_siftID,threadID,cluster_id,num_potential_matches,num_pts_per_thread,target_siftID; int min_id,next_min_id,i,j; int min_num,next_min,dist; extern __shared__ int shared_array[]; source_siftID = blockIdx.x; threadID = threadIdx.x; cluster_id = clusterID[source_siftID]; num_potential_matches = potentialMatches[(2+maxNumPotentialMatches)*cluster_id]; // matches[source_siftID] = -1; if (num_potential_matches%blockDim.x == 0) num_pts_per_thread = num_potential_matches/blockDim.x; else num_pts_per_thread = 1 + (num_potential_matches/blockDim.x); // if(num_potential_matches > maxNumPotentialMatches) // printf("%d\n",num_potential_matches); min_num = 999999; next_min = 999999; min_id = -1; next_min_id = -1; int dummy; int a,b; for(i = 0;i < num_pts_per_thread; i++) { if((num_pts_per_thread*threadID)+i < num_potential_matches) { target_siftID = potentialMatches[((2+maxNumPotentialMatches)*cluster_id)+1+(num_pts_per_thread*threadID)+i]; dist = 0; for(j = 0 ; j < 128; j++) { a = tex1Dfetch(tex_targetDescriptor,(target_siftID*128)+j); b = tex1Dfetch(tex_sourceDescriptor,(source_siftID*128)+j); dummy = a-b; dist+=(dummy*dummy); } // for( j = 0 ; j < 128 ; j++) // { // dist+= (((int)tex1Dfetch(tex_sourceDescriptor,(source_siftID*128)+j) - ((int)tex1Dfetch(tex_targetDescriptor,(target_siftID*128)+j)))*((int)tex1Dfetch(tex_sourceDescriptor,(source_siftID*128)+j) - ((int)tex1Dfetch(tex_targetDescriptor,(target_siftID*128)+j)))); // } if(dist <= min_num) { next_min = min_num; next_min_id = min_id; min_num = dist; min_id = target_siftID; } else if(dist < next_min) { next_min = dist; next_min_id = target_siftID; } } } // Store the min and next min found by each thread into the shared memory shared_array[threadID] = min_num; shared_array[threadID+blockDim.x] = next_min; shared_array[threadID+2*blockDim.x] = min_id; shared_array[threadID+3*blockDim.x] = next_min_id; __syncthreads(); // Shared array fully occupied // Time to compute the min and next min across the block ( min and next min in the shared memory ) int c,d; int a_id,b_id,c_id,d_id; int min1,min2; int min1_id,min2_id; if(threadID < 128) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+128]; d = shared_array[threadID+blockDim.x+128]; c_id = shared_array[threadID+2*blockDim.x+128]; d_id = shared_array[threadID+3*blockDim.x+128]; findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); shared_array[threadID] = min1; shared_array[threadID+blockDim.x] = min2; shared_array[threadID+2*blockDim.x] = min1_id; shared_array[threadID+3*blockDim.x] = min2_id; } __syncthreads(); if(threadID < 64) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+64]; d = shared_array[threadID+blockDim.x+64]; c_id = shared_array[threadID+2*blockDim.x+64]; d_id = shared_array[threadID+3*blockDim.x+64]; findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); shared_array[threadID] = min1; shared_array[threadID+blockDim.x] = min2; shared_array[threadID+2*blockDim.x] = min1_id; shared_array[threadID+3*blockDim.x] = min2_id; } __syncthreads(); if(threadID < 32) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+32]; d = shared_array[threadID+blockDim.x+32]; c_id = shared_array[threadID+2*blockDim.x+32]; d_id = shared_array[threadID+3*blockDim.x+32]; findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); shared_array[threadID] = min1; shared_array[threadID+blockDim.x] = min2; shared_array[threadID+2*blockDim.x] = min1_id; shared_array[threadID+3*blockDim.x] = min2_id; } __syncthreads(); if(threadID < 16) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+16]; d = shared_array[threadID+blockDim.x+16]; c_id = shared_array[threadID+2*blockDim.x+16]; d_id = shared_array[threadID+3*blockDim.x+16]; findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); shared_array[threadID] = min1; shared_array[threadID+blockDim.x] = min2; shared_array[threadID+2*blockDim.x] = min1_id; shared_array[threadID+3*blockDim.x] = min2_id; } __syncthreads(); if(threadID < 8) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+8]; d = shared_array[threadID+blockDim.x+8]; c_id = shared_array[threadID+2*blockDim.x+8]; d_id = shared_array[threadID+3*blockDim.x+8]; findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); shared_array[threadID] = min1; shared_array[threadID+blockDim.x] = min2; shared_array[threadID+2*blockDim.x] = min1_id; shared_array[threadID+3*blockDim.x] = min2_id; } __syncthreads(); if(threadID < 4) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+4]; d = shared_array[threadID+blockDim.x+4]; c_id = shared_array[threadID+2*blockDim.x+4]; d_id = shared_array[threadID+3*blockDim.x+4]; findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); shared_array[threadID] = min1; shared_array[threadID+blockDim.x] = min2; shared_array[threadID+2*blockDim.x] = min1_id; shared_array[threadID+3*blockDim.x] = min2_id; } __syncthreads(); if(threadID < 2) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+2]; d = shared_array[threadID+blockDim.x+2]; c_id = shared_array[threadID+2*blockDim.x+2]; d_id = shared_array[threadID+3*blockDim.x+2]; findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); shared_array[threadID] = min1; shared_array[threadID+blockDim.x] = min2; shared_array[threadID+2*blockDim.x] = min1_id; shared_array[threadID+3*blockDim.x] = min2_id; } __syncthreads(); if(threadID == 0) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+1]; d = shared_array[threadID+blockDim.x+1]; c_id = shared_array[threadID+2*blockDim.x+1]; d_id = shared_array[threadID+3*blockDim.x+1]; // Compute min and next min findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); // Ratio test if((float)min1/min2 < 0.36 && min1_id > 0 && num_potential_matches > 9 && min2_id != 999999) // we store the squared distances so the ratio becomes 0.36 instead of 0.6 { // Declare match matches[source_siftID] = min1_id; } } __syncthreads(); } int max(int a,int b,int c,int d) { int maxim = -9999; if(maxim < a) maxim = a; if(maxim < b) maxim = b; if(maxim < c) maxim = c; if(maxim < d) maxim = d; return maxim; } double Matcher::matchImagePair(sift_img source,sift_img target,float *f_matrix,SiftGPU::SiftKeypoint* &d_keys1,unsigned char* &descriptors1 ,SiftGPU::SiftKeypoint* &d_keys2,unsigned char* &descriptors2 ) { struct timespec t1, t2, t3, t4, t5; clock_gettime(CLOCK_MONOTONIC, &t3); clock_gettime(CLOCK_MONOTONIC, &t1); //================== GPU initialization ======================= hipSetDevice(0); hipFree(0); //============================================================= clock_gettime(CLOCK_MONOTONIC, &t2); double time_gpu_init = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================= //============================================================= clock_gettime(CLOCK_MONOTONIC, &t1); //================== Declare variables ======================== int i,j; int *h_cluster, *h_gridID; long long int *h_epipolarPoints; // Stores ends of the epipolar line segment packed in a single long long int h_epipolarPoints = (long long int*) malloc(sizeof(long long int)*source.num_keys); h_cluster = (int *) malloc(sizeof(int)*source.num_keys); h_gridID = (int *) malloc(sizeof(int)*target.num_keys); //to store gridIDs on host clock_gettime(CLOCK_MONOTONIC, &t2); double time_total_cpu_alloc = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); clock_gettime(CLOCK_MONOTONIC, &t1); //==================== Declare device arrays =================== clock_gettime(CLOCK_MONOTONIC, &t5); // float *d_target_x, *d_target_y, *d_source_x, *d_source_y; // SiftGPU::SiftKeypoint *d_keys1, *d_keys2; float *d_fmatrix; // float *desc1,*desc2; int *d_gridID , *d_target_siftID , *d_gridID1 , *d_target_siftID1, *d_gridID2 , *d_target_siftID2,*d_gridID3 , *d_target_siftID3,*d_gridID4 , *d_target_siftID4, *d_source_siftID, *d_cluster, *d_clusterID; //// unsigned char *d_source_keypoints,*d_target_keypoints; long long int *d_epipolarPoints; //============================================================== //=================== Allocate memory on device ================ // hipMalloc((void**)&d_keys1, sizeof(SiftGPU::SiftKeypoint)*source.num_keys); // hipMalloc((void**)&d_keys2, sizeof(SiftGPU::SiftKeypoint)*target.num_keys); // hipMalloc((void**)&desc1, 128*sizeof(float)*source.num_keys); // hipMalloc((void**)&desc2, 128*sizeof(float)*target.num_keys); // hipMalloc((void**)&d_source_x, sizeof(float)*source.num_keys); // hipMalloc((void**)&d_source_y, sizeof(float)*source.num_keys); // hipMalloc((void**)&d_target_x, sizeof(float)*target.num_keys); // hipMalloc((void**)&d_target_y, sizeof(float)*target.num_keys); //// hipMalloc((void**)&d_source_keypoints, sizeof(unsigned char)*128*source.num_keys); //// hipMalloc((void**)&d_target_keypoints, sizeof(unsigned char)*128*target.num_keys); hipMalloc((void**)&d_source_siftID, sizeof(int)*source.num_keys); hipMalloc((void**)&d_target_siftID, sizeof(int)*target.num_keys); hipMalloc((void**)&d_target_siftID1, sizeof(int)*target.num_keys); hipMalloc((void**)&d_target_siftID2, sizeof(int)*target.num_keys); hipMalloc((void**)&d_target_siftID3, sizeof(int)*target.num_keys); hipMalloc((void**)&d_target_siftID4, sizeof(int)*target.num_keys); hipMalloc((void**)&d_fmatrix, sizeof(float)*9); hipMalloc((void**)&d_gridID, sizeof(int)*target.num_keys); hipMalloc((void**)&d_gridID1, sizeof(int)*target.num_keys); hipMalloc((void**)&d_gridID2, sizeof(int)*target.num_keys); hipMalloc((void**)&d_gridID3, sizeof(int)*target.num_keys); hipMalloc((void**)&d_gridID4, sizeof(int)*target.num_keys); hipMalloc((void**)&d_epipolarPoints, sizeof(long long int)*source.num_keys); hipMalloc((void**)&d_cluster, sizeof(int)*source.num_keys); hipMalloc((void**)&d_clusterID, sizeof(int)*source.num_keys); //============================================================== clock_gettime(CLOCK_MONOTONIC, &t2); double time_malloc1 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //================ Copy arrays from host to device ============= // hipMemcpy(desc1, descriptors1.data(), sizeof(float)*source.num_keys*128, hipMemcpyHostToDevice); // hipMemcpy(desc2, descriptors2.data(), sizeof(float)*target.num_keys*128, hipMemcpyHostToDevice); clock_gettime(CLOCK_MONOTONIC, &t1); // hipMemcpy(d_keys1, keys1.data(), sizeof(SiftGPU::SiftKeypoint)*source.num_keys, hipMemcpyHostToDevice); // hipMemcpy(d_keys2, keys2.data(), sizeof(SiftGPU::SiftKeypoint)*target.num_keys, hipMemcpyHostToDevice); hipMemcpy(d_fmatrix, f_matrix, sizeof(float)*9, hipMemcpyHostToDevice); // hipMemcpy(d_target_keypoints, target.keypoints, sizeof(unsigned char)*target.num_keys*128, hipMemcpyHostToDevice); // hipMemcpy(d_source_keypoints, source.keypoints, sizeof(unsigned char)*source.num_keys*128, hipMemcpyHostToDevice); //============================================================== int threadsPerBlock; int numBlocks; thrust::device_ptr<int> d_target_sift_ptr(d_target_siftID); thrust::device_ptr<int> d_source_sift_ptr(d_source_siftID); thrust::sequence(d_target_sift_ptr,d_target_sift_ptr+target.num_keys); thrust::sequence(d_source_sift_ptr,d_source_sift_ptr+source.num_keys); // initialize a device_vector with the list //// thrust::host_vector<unsigned char> h_d1(descriptors1.begin(), descriptors1.end()); //// thrust::host_vector<unsigned char> h_d2(descriptors2.begin(), descriptors2.end()); //// thrust::device_vector<unsigned char> d1 = h_d1; //// thrust::device_vector<unsigned char> d2 = h_d2; // thrust::device_ptr<unsigned char> d_desc1_ptr(desc1); // thrust::device_ptr<unsigned char> d_desc2_ptr(desc2); // thrust::device_vector<unsigned char> u_d1(128*source.num_keys); // thrust::device_vector<unsigned char> u_d2(128*target.num_keys); // thrust::transform(d1.begin(), d1.end(), u_d1.begin(), cast()); // thrust::transform(d2.begin(), d2.end(), u_d2.begin(), cast()); // thrust::transform(d_desc1_ptr,d_desc1_ptr + 128*source.num_keys, u_d1.begin(), cast()); // thrust::transform(d_desc2_ptr,d_desc2_ptr + 128*target.num_keys, u_d2.begin(), cast()); //// unsigned char * desc1_ptr = thrust::raw_pointer_cast(d1.data()); //// unsigned char * desc2_ptr = thrust::raw_pointer_cast(d2.data()); // unsigned char *h = (unsigned char *)malloc(128*target.num_keys*sizeof(unsigned char)); // unsigned char * dv_ptr = thrust::raw_pointer_cast(u_d2.data()); // hipMemcpy(h,dv_ptr,128*sizeof(unsigned char)*target.num_keys, hipMemcpyDeviceToHost); // for(int i = 0; i < 128*target.num_keys; i++) // { // printf("%hhu\n",h[i]); // } hipMemcpy(d_target_siftID1, d_target_siftID, sizeof(int)*target.num_keys, hipMemcpyDeviceToDevice); hipMemcpy(d_target_siftID2, d_target_siftID, sizeof(int)*target.num_keys, hipMemcpyDeviceToDevice); hipMemcpy(d_target_siftID3, d_target_siftID, sizeof(int)*target.num_keys, hipMemcpyDeviceToDevice); hipMemcpy(d_target_siftID4, d_target_siftID, sizeof(int)*target.num_keys, hipMemcpyDeviceToDevice); //====================== Bind textures ========================= hipBindTexture (0, tex_targetDescriptor, descriptors2, sizeof(unsigned char)*target.num_keys*128 ); hipBindTexture (0, tex_sourceDescriptor, descriptors1, sizeof(unsigned char)*source.num_keys*128 ); // hipBindTexture (0, tex_targetDescriptor, d_target_keypoints, sizeof(unsigned char)*target.num_keys*128 ); // hipBindTexture (0, tex_sourceDescriptor, d_source_keypoints, sizeof(unsigned char)*source.num_keys*128 ); clock_gettime(CLOCK_MONOTONIC, &t2); double time_memcpy1 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //printf("%d %d\n",source.num_keys,target.num_keys); //====================== INDEXING STEP ======================== // The indexing step divides the target image into grids. // The features get indexed based on the grid to which they belong. //============================================================== Grid target_grid; target_grid.g_init(target); //================ Decide kernel configuration ================= threadsPerBlock = 512; numBlocks = ceil(((float)target.num_keys)/threadsPerBlock); //===================== Call the GPU Kernel ==================== // This kernel computes gridID for each sift of the target image clock_gettime(CLOCK_MONOTONIC, &t1); hipLaunchKernelGGL(( computeGridID), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_keys2, d_gridID1, d_gridID2, d_gridID3, d_gridID4, target.num_keys,target_grid.gridSize, target_grid.numXGrids1,target_grid.halfSize,target_grid.numXGrids2,target_grid.numGrids,target_grid.numGridsXOv,target_grid.numGridsYOv); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_computeGridID = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== // Current situation // +-------+---+---+---+---+---+---+---+---- // |SiftID | 0 | 1 | 2 | 3 | 4 | 5 | 6 | ... // +-------+---+---+---+---+---+---+---+---- // |GridID | a | b | c | d | e | f | g | ... // +-------+---+---+---+---+---+---+---+---- // Desired situation // +-------+---+---+---+---+---+---+---+---- // |GridID | 0 | 0 | 2 | 5 | 5 | 5 | 6 | ... // +-------+---+---+---+---+---+---+---+---- // |SiftID | a | b | c | d | e | f | g | ... // +-------+---+---+---+---+---+---+---+---- // Sort to bring sifts with same gridID together //========= Sort : keys = gridID , values = siftID =========== clock_gettime(CLOCK_MONOTONIC, &t1); thrust::device_ptr<int> d_gridID_ptr1(d_gridID1); thrust::device_ptr<int> d_target_siftID_ptr1(d_target_siftID1); thrust::device_ptr<int> d_gridID_ptr2(d_gridID2); thrust::device_ptr<int> d_target_siftID_ptr2(d_target_siftID2); thrust::device_ptr<int> d_gridID_ptr3(d_gridID3); thrust::device_ptr<int> d_target_siftID_ptr3(d_target_siftID3); thrust::device_ptr<int> d_gridID_ptr4(d_gridID4); thrust::device_ptr<int> d_target_siftID_ptr4(d_target_siftID4); thrust::sort_by_key(d_gridID_ptr1, d_gridID_ptr1+target.num_keys,d_target_siftID_ptr1); thrust::sort_by_key(d_gridID_ptr2, d_gridID_ptr2+target.num_keys,d_target_siftID_ptr2); thrust::sort_by_key(d_gridID_ptr3, d_gridID_ptr3+target.num_keys,d_target_siftID_ptr3); thrust::sort_by_key(d_gridID_ptr4, d_gridID_ptr4+target.num_keys,d_target_siftID_ptr4); clock_gettime(CLOCK_MONOTONIC, &t2); double time_thrust1 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); // Current situation // +-------+---+---+---+---+---+---+---+---- // |GridID | 0 | 0 | 2 | 5 | 5 | 5 | 6 | ... // +-------+---+---+---+---+---+---+---+---- // |SiftID | a | b | c | d | e | f | g | ... // +-------+---+---+---+---+---+---+---+---- // Now to access all sifts corresponding to gridID = x we record topOfBucket // +---------------+---+---+---+---+---+---+---+--- // |GridID | 0 | 1 | 2 | 3 | 4 | 5 | 6 | ... // +---------------+---+---+---+---+---+---+---+--- // |TopOfBucket | 0 |-1 | 2 |-1 |-1 | 3 | 6 | ... // +---------------+---+---+---+---+---+---+---+--- // filhal hipMemcpy(d_gridID, d_gridID1, sizeof(int)*target.num_keys, hipMemcpyDeviceToDevice); hipMemcpy(d_target_siftID, d_target_siftID1, sizeof(int)*target.num_keys, hipMemcpyDeviceToDevice); //================== Record topOfBucket ===================== clock_gettime(CLOCK_MONOTONIC, &t1); thrust::device_vector<int> d_topOfBucket(target_grid.numGrids, -1); thrust::device_vector<int> d_numSift(target_grid.numGrids, 0); thrust::device_vector<int> d_topOfBucket2(target_grid.numGridsXOv, -1); thrust::device_vector<int> d_numSift2(target_grid.numGridsXOv, 0); thrust::device_vector<int> d_topOfBucket3(target_grid.numGridsYOv, -1); thrust::device_vector<int> d_numSift3(target_grid.numGridsYOv, 0); thrust::device_vector<int> d_topOfBucket4(target_grid.numGridsXYOv, -1); thrust::device_vector<int> d_numSift4(target_grid.numGridsXYOv, 0); int *d_topOfBucket_ptr = thrust::raw_pointer_cast(d_topOfBucket.data()); int *d_numSift_ptr = thrust::raw_pointer_cast(d_numSift.data()); int *d_topOfBucket2_ptr = thrust::raw_pointer_cast(d_topOfBucket2.data()); int *d_numSift2_ptr = thrust::raw_pointer_cast(d_numSift2.data()); int *d_topOfBucket3_ptr = thrust::raw_pointer_cast(d_topOfBucket3.data()); int *d_numSift3_ptr = thrust::raw_pointer_cast(d_numSift3.data()); int *d_topOfBucket4_ptr = thrust::raw_pointer_cast(d_topOfBucket4.data()); int *d_numSift4_ptr = thrust::raw_pointer_cast(d_numSift4.data()); clock_gettime(CLOCK_MONOTONIC, &t2); double time_thrust2 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //================== Call the GPU kernel ==================== clock_gettime(CLOCK_MONOTONIC, &t1); hipLaunchKernelGGL(( findTopOfBucket), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_gridID,d_gridID2,d_gridID3,d_gridID4, target.num_keys,d_topOfBucket_ptr,d_topOfBucket2_ptr,d_topOfBucket3_ptr,d_topOfBucket4_ptr,target_grid.numGrids,target_grid.numGridsXOv,target_grid.numGridsYOv,target_grid.numGridsXYOv); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_topOfBucket = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //========== Record the number of sifts in each grid =========== threadsPerBlock = 512; numBlocks = ceil((float)(max(target_grid.numGrids,target_grid.numGridsXOv,target_grid.numGridsYOv,target_grid.numGridsXYOv))/threadsPerBlock); clock_gettime(CLOCK_MONOTONIC, &t1); hipLaunchKernelGGL(( findNumSift), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_gridID,d_topOfBucket_ptr,d_gridID2,d_topOfBucket2_ptr,d_gridID3,d_topOfBucket3_ptr,d_gridID4,d_topOfBucket4_ptr,target_grid.numGrids,target_grid.numGridsXOv,target_grid.numGridsYOv,target_grid.numGridsXYOv,d_numSift_ptr,d_numSift2_ptr,d_numSift3_ptr,d_numSift4_ptr); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_numSift= ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //==================== CLUSTERING STEP ======================== // This step deals with finding the epipolar lines for each sift // of the source image and clustering sifts with similar epipolar // lines together. //============================================================== //=================== Find epipolar lines ====================== // The kernel findEpipolarLine finds the epipolar line and stores // stores its points of intersection with the boundaries of the // target image in d_epipolarPoints. //============================================================== //=============== Decide kernel configuration ================== threadsPerBlock = 512; numBlocks = ceil(((float)source.num_keys)/threadsPerBlock); clock_gettime(CLOCK_MONOTONIC, &t1); hipLaunchKernelGGL(( findEpipolarLine), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_fmatrix,d_keys1,source.width,source.height, target.width, target.height,d_epipolarPoints,source.num_keys); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_findEpiline = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //==== Sort the epipolar lines (keys) with siftIDs as values ==== clock_gettime(CLOCK_MONOTONIC, &t1); thrust::device_ptr<long long int> d_epipolarPoints_ptr(d_epipolarPoints); thrust::device_ptr<int> d_source_siftID_ptr(d_source_siftID); thrust::sort_by_key(d_epipolarPoints_ptr, d_epipolarPoints_ptr+source.num_keys,d_source_siftID_ptr); clock_gettime(CLOCK_MONOTONIC, &t2); double time_thrust4 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== // Current scene : // sifts with similar epipolar lines are together now // +----------------------+---+---+---+---+---+---+---+---- // |Epipolar Line end pts | a | b | c | d | e | f | g | ... (sorted) // +----------------------+---+---+---+---+---+---+---+---- // |SiftID | m | n | o | p | q | r | s | ... (random order) // +----------------------+---+---+---+---+---+---+---+---- // An array called cluster(h_cluster and d_cluster) will set the boundaries // If epl[i] - epl[i-1] < threshold then cluster[i] = 0 else cluster[i] = 1 //============================================================== h_cluster[0] = 0; clock_gettime(CLOCK_MONOTONIC, &t1); hipMemcpy(d_cluster, h_cluster, sizeof(int)*source.num_keys, hipMemcpyHostToDevice); clock_gettime(CLOCK_MONOTONIC, &t2); double time_memcpy3 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //=== Cluster the epipolarPoints together (similar epipolar lines in one group) === clock_gettime(CLOCK_MONOTONIC, &t1); hipLaunchKernelGGL(( clusterLines), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_epipolarPoints,d_cluster,source.num_keys); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_clusterLines = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); // Scene after clustering : // sifts with similar epipolar lines are together now // +----------------------+---+---+---+---+---+---+---+---- // |Epipolar Line end pts | a | b | c | d | e | f | g | ... (sorted) // +----------------------+---+---+---+---+---+---+---+---- // |SiftID | m | n | o | p | q | r | s | ... (random order) // +----------------------+---+---+---+---+---+---+---+---- // |cluster | 0 | 0 | 1 | 0 | 1 | 1 | 1 | ... // +----------------------+---+---+---+---+---+---+---+---- // Now compute prefix sum to assign cluster ids //============================================================== clock_gettime(CLOCK_MONOTONIC, &t1); thrust::device_ptr<int> d_cluster_ptr(d_cluster); thrust::inclusive_scan(d_cluster_ptr, d_cluster_ptr + source.num_keys, d_cluster_ptr); clock_gettime(CLOCK_MONOTONIC, &t2); double time_thrust5 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //================= current scene example ====================== // // +---------------+---+---+---+---+---+---+---+---- // |id | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 ... // +---------------+---+---+---+---+---+---+---+---- // |siftID | a | b | c | d | e | f | g | h ... // +---------------+---+---+---+---+---+---+---+---- // |h_cluster | 0 | 1 | 1 | 2 | 3 | 3 | 3 | 4 ... // +---------------+---+---+---+---+---+---+---+---- // // so we want cluster location to be like this // // +-----------------+---+---+---+---+---+---+---+---- // |cluster_id | 0 | 1 | 2 | 3 | 4 | ... // +-----------------+---+---+---+---+---+---+---+---- // |cluster_location | 0 | 1 | 3 | 4 | 7 | ... // +-----------------+---+---+---+---+---+---+---+---- // //============================================================== clock_gettime(CLOCK_MONOTONIC, &t1); hipMemcpy(h_cluster, d_cluster, sizeof(int)*source.num_keys, hipMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC, &t2); double time_memcpy4 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== // ================= Record Number of Clusters ================ int numClusters = h_cluster[source.num_keys-1]+1; //============================================================== clock_gettime(CLOCK_MONOTONIC, &t1); thrust::device_vector<int> d_clusterLocation(numClusters, -1); d_clusterLocation[h_cluster[0]] = 0; int * d_clusterLocation_ptr = thrust::raw_pointer_cast(d_clusterLocation.data()); clock_gettime(CLOCK_MONOTONIC, &t2); double time_thrust6 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //==================== Find Cluster Location =================== clock_gettime(CLOCK_MONOTONIC, &t1); hipLaunchKernelGGL(( findClusterLocation), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_cluster, source.num_keys,d_clusterLocation_ptr,d_source_siftID,d_clusterID); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_findClusterLocation = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //===================== Matching Step ========================== // // Matching involves the following two kernels : // ----------------------------------------------------------- // // -> FindPotentialMatches which finds eqidistant points on the // epipolar line and the grids those points fall into. Then it // stores the target sifts which are a part of those grids. // These sifts are our potential matches. This is done for each // cluster. // // ----------------------------------------------------------- // // -> FindMatches which, for each source sift, loads the potential // matches and computes the euclidian distances. It then takes // the ratio of minimum and next minimum distance and declares // a match based on some criterias. // //============================================================== // [date : 1 oct 2015] first we do a find potential matches to just find the num of potential matches for each cluster // then we find the max of these num_potential_matches // and then we malloc the d_pot_patches array with that size //================= Decide kernel configuration ================ threadsPerBlock = 256 ; numBlocks = numClusters; //============================================================== thrust::device_vector<int> d_numPotentialMatches(numClusters, 0); int * d_numPotentialMatches_ptr = thrust::raw_pointer_cast(d_numPotentialMatches.data()); clock_gettime(CLOCK_MONOTONIC, &t1); hipLaunchKernelGGL(( findNumPotentialMatchesForEachCluster), dim3(numBlocks), dim3(threadsPerBlock), 2*threadsPerBlock*sizeof(int), 0, d_gridID,d_target_siftID,d_topOfBucket_ptr,d_gridID2,d_target_siftID2,d_topOfBucket2_ptr,d_gridID3,d_target_siftID3,d_topOfBucket3_ptr,d_gridID4,d_target_siftID4,d_topOfBucket4_ptr,target_grid.gridSize,d_clusterLocation_ptr,d_epipolarPoints,d_numSift_ptr,d_numSift2_ptr,d_numSift3_ptr,d_numSift4_ptr,target.height, target_grid.numXGrids1,target_grid.halfSize,target_grid.numXGrids2,target_grid.numGridsXOv,target_grid.numGridsYOv,target_grid.numGrids,target_grid.numGridsXYOv,d_numPotentialMatches_ptr); hipDeviceSynchronize(); // for(int i = 0 ; i < d_numPotentialMatches.size(); i++) // std::cout << d_numPotentialMatches[i] << std::endl; thrust::device_vector<int>::iterator d_max = thrust::max_element(d_numPotentialMatches.begin(),d_numPotentialMatches.end()); // int *result = thrust::min_element(d_numPotentialMatches.begin(), d_numPotentialMatches.end()); int maxNumPotentialMatches = *d_max; printf("maxpot = %d\n",maxNumPotentialMatches); clock_gettime(CLOCK_MONOTONIC, &t2); double time_computeNumPotentialMatches = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //================ Malloc d_potentialMatches =================== clock_gettime(CLOCK_MONOTONIC, &t1); int *d_potentialMatches; hipMalloc((void **)&d_potentialMatches,numClusters*(maxNumPotentialMatches+2)*sizeof(int)); // allocate memory on device clock_gettime(CLOCK_MONOTONIC, &t2); double time_malloc4 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //=========== Find Potential Matches for each cluster ========== clock_gettime(CLOCK_MONOTONIC, &t1); hipLaunchKernelGGL(( findPotentialMatchesForEachCluster), dim3(numBlocks), dim3(threadsPerBlock), 2*threadsPerBlock*sizeof(int), 0, d_gridID,d_target_siftID,d_topOfBucket_ptr,d_gridID2,d_target_siftID2,d_topOfBucket2_ptr,d_gridID3,d_target_siftID3,d_topOfBucket3_ptr,d_gridID4,d_target_siftID4,d_topOfBucket4_ptr,target_grid.gridSize,d_clusterLocation_ptr,d_epipolarPoints,d_numSift_ptr,d_numSift2_ptr,d_numSift3_ptr,d_numSift4_ptr,d_potentialMatches,target.height, target_grid.numXGrids1,target_grid.halfSize,target_grid.numXGrids2,target_grid.numGridsXOv,target_grid.numGridsYOv,target_grid.numGrids,target_grid.numGridsXYOv,maxNumPotentialMatches); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_computePotentialMatches = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== // Declare matches array which stores the matching siftID // ( or -1 if no match found) for each query sift clock_gettime(CLOCK_MONOTONIC, &t1); int *match = (int *)malloc(sizeof(int)*source.num_keys); int *d_matches_ptr; hipMalloc((void**)&d_matches_ptr, sizeof(int)*source.num_keys); for(i = 0 ; i < source.num_keys; i++) { match[i] = -1; } hipMemcpy(d_matches_ptr, match, sizeof(int)*source.num_keys, hipMemcpyHostToDevice); int *h_potentialMatches = (int *)malloc(numClusters*(2+maxNumPotentialMatches)*sizeof(int)); // allocate memory on device hipMemcpy(h_potentialMatches , d_potentialMatches,numClusters*(maxNumPotentialMatches+2)*sizeof(int) , hipMemcpyDeviceToHost); /* for(int i = 0 ; i < numClusters; i++) { if(h_potentialMatches[maxNumPotentialMatches*i] != 0) printf("numpotm = %d\n",h_potentialMatches[maxNumPotentialMatches*i]); } */ // thrust::device_vector<int> d_matches(source.num_keys, -1); // int * d_matches_ptr = thrust::raw_pointer_cast(d_matches.data()); clock_gettime(CLOCK_MONOTONIC, &t2); double time_thrust3 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //================= Decide kernel configuration ================ threadsPerBlock = 256 ; numBlocks = source.num_keys; //============================================================== //======================== Find Matches ======================== clock_gettime(CLOCK_MONOTONIC, &t1); hipLaunchKernelGGL(( findMatches), dim3(numBlocks), dim3(threadsPerBlock),4*threadsPerBlock*sizeof(int), 0, d_matches_ptr,d_clusterID,d_potentialMatches,maxNumPotentialMatches); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_computeMatches = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== hipMemcpy(match, d_matches_ptr, sizeof(int)*source.num_keys, hipMemcpyDeviceToHost); //====================== memcpy ============================== clock_gettime(CLOCK_MONOTONIC, &t1); // thrust::host_vector<int> h_matches = d_matches; clock_gettime(CLOCK_MONOTONIC, &t2); double time_memcpy5 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //======================== CLEAN UP ============================ //===================== bind the textures ==================== clock_gettime(CLOCK_MONOTONIC, &t1); hipUnbindTexture (tex_targetDescriptor); hipUnbindTexture (tex_sourceDescriptor); clock_gettime(CLOCK_MONOTONIC, &t2); double time_unbindtex = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //===================== Free device memory ===================== clock_gettime(CLOCK_MONOTONIC, &t1); // hipFree(d_keys1); // hipFree(d_keys2); // hipFree(d_source_x); // hipFree(d_source_y); // hipFree(d_target_x); // hipFree(d_target_y); //// hipFree(d_source_keypoints); //// hipFree(d_target_keypoints); hipFree(d_source_siftID); hipFree(d_target_siftID); hipFree(d_target_siftID1); hipFree(d_target_siftID2); hipFree(d_target_siftID3); hipFree(d_target_siftID4); hipFree(d_fmatrix); hipFree(d_gridID); hipFree(d_gridID1); hipFree(d_gridID2); hipFree(d_gridID3); hipFree(d_gridID4); hipFree(d_epipolarPoints); hipFree(d_cluster); hipFree(d_clusterID); hipFree(d_potentialMatches); hipFree(d_matches_ptr); //======================== Free host memory ==================== /* free(h_target_siftID); free(h_gridID); free(target_x); free(target_y); free(target_scale); free(target_orient); free(target_keypoints); free(h_source_siftID); free(source_x); free(source_y); free(source_scale); free(source_orient); free(source_keypoints); */ free(h_cluster); free(h_epipolarPoints); free(h_gridID); free(h_potentialMatches); clock_gettime(CLOCK_MONOTONIC, &t2); double time_freemem = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); clock_gettime(CLOCK_MONOTONIC, &t4); double time_total = ((t4.tv_sec - t3.tv_sec)*1000) + (((double)(t4.tv_nsec - t3.tv_nsec))/1000000.0); double time_gpu = ((t4.tv_sec - t5.tv_sec)*1000) + (((double)(t4.tv_nsec - t5.tv_nsec))/1000000.0); //============================================================== int num_matches = 0; for(i = 0 ; i < source.num_keys; i++) { if(match[i]!=-1) { num_matches++; matches.push_back(std::make_pair(i,match[i])); } } free(match); //============================================================== /* std::cout << "Time on GPU init: " << time_gpu_init << std::endl; std::cout << "Time on CPU setup: " << time_total_cpu_alloc << std::endl; std::cout << "Time - memcpy : " << time_memcpy1+time_memcpy3+time_memcpy4+time_memcpy5 << std::endl; printf("time_memcpy1 %lf, time_memcpy3 %lf , time_memcpy4 %lf, time_memcpy5 %lf\n",time_memcpy1,time_memcpy3,time_memcpy4,time_memcpy5); std::cout << "Time - malloc : " << time_malloc1+time_malloc4 << std::endl; std::cout << "Time to compute gridID : " << time_computeGridID << std::endl; std::cout << "Time - thrust : " << time_thrust1+time_thrust2+time_thrust3+time_thrust4+time_thrust5+time_thrust6 << std::endl; std::cout << "Time to compute topOfBucket : " << time_topOfBucket << std::endl; std::cout << "Time to find num sifts per grid : " << time_numSift << std::endl; std::cout << "Time to find epipolar lines : " << time_findEpiline << std::endl; std::cout << "Time to cluster lines : " << time_clusterLines << std::endl; std::cout << "Time to find cluster location : " << time_findClusterLocation << std::endl; std::cout << "Time to compute num potential matches per cluster: " << time_computeNumPotentialMatches << std::endl; std::cout << "Time to compute potential matches per cluster: " << time_computePotentialMatches << std::endl; std::cout << "Time to compute matches: " << time_computeMatches << std::endl; std::cout << "Time to unbind textures : " << time_unbindtex << std::endl; std::cout << "Time to free memory : " << time_freemem << std::endl; std::cout << "total time on gpu: " << time_gpu << std::endl; std::cout << "total time : " << time_total << std::endl; */ std::cout << "Total matches found : " << num_matches << std::endl; std::cout << time_gpu << std::endl; return time_gpu ; //============================================================== }
20ced1aa2e500e9bc08433ceeea379dd4bb0477c.cu
#include "defs.h" #include "sifts.h" #include "Gridder.h" #include "Matcher.h" #include <thrust/extrema.h> //============================== DESCRIPTION ===================================== // // This code deals with matching sifts between two images // This code implements the algorithm for GPU Matching. It is assumed that // F-matrix is computed from the coarse k% features and remaining features are // matched using this algorithm. We will match SIFT // features in the query/source image to those in the target image. // //================================================================================ //============================ Declare textures ================================== texture <unsigned char, 1, cudaReadModeElementType> tex_sourceDescriptor; texture <unsigned char, 1, cudaReadModeElementType> tex_targetDescriptor; void Matcher::visualizeMatches(sift_img source,sift_img target,std::vector<std::pair<int,int> > matches,std::vector<SiftGPU::SiftKeypoint> &keys1,std::vector<SiftGPU::SiftKeypoint> &keys2) { const char* saveAs = NULL; cv::Mat queryImage = source.img; int qWidth = source.width; int qHeight = source.height; cv::Mat referenceImage = target.img; int rWidth = target.width; int rHeight = target.height; int canvasWidth = qWidth + rWidth; int canvasHeight = qHeight > rHeight ? qHeight : rHeight; cv::Mat canvas(canvasHeight, canvasWidth, CV_8UC3, cv::Scalar(0,0,0)); cv::Rect roi1 = cv::Rect(0,0,qWidth,qHeight); cv::Mat canvas_roi1 = canvas(roi1); queryImage.copyTo(canvas_roi1); cv::Rect roi2 = cv::Rect(qWidth,0,rWidth,rHeight); cv::Mat canvas_roi2 = canvas(roi2); referenceImage.copyTo(canvas_roi2); for(int i=0; i < matches.size(); i++) { std::pair<int,int> p = matches[i]; cv::Point pt1 = cv::Point(keys1[p.first].x, keys1[p.first].y); cv::Point pt2 = cv::Point(keys2[p.second].x + qWidth, keys2[p.second].y); cv::circle(canvas, pt1, 2, cv::Scalar(0,255,0), 4); cv::circle(canvas, pt2, 2, cv::Scalar(0,255,0), 4); cv::line(canvas, pt1, pt2, cv::Scalar(0,255,0), 4); } cv::namedWindow("FeatureMatches",cv::WINDOW_NORMAL); imshow("FeatureMatches", canvas); cv::waitKey(); if(saveAs != NULL) { imwrite( saveAs, canvas ); } } struct cast { __host__ __device__ unsigned char operator()(float x) { return ((unsigned char)(512*x)); } }; __global__ void computeGridID(SiftGPU::SiftKeypoint *keys, int *gridID1,int *gridID2,int *gridID3,int *gridID4, int num_keys,int gridSize,int numXGrids1,int halfSize,int numXGrids2,int numGrids,int numGridsXOv,int numGridsYOv) { //=========== Function to compute gridID given siftID =========== // Find data location to be processed by current thread int siftID = blockIdx.x*blockDim.x + threadIdx.x; if(siftID < num_keys) { double simpleX = ((double)keys[siftID].x/(double)gridSize); double simpleY = ((double)keys[siftID].y/(double)gridSize); double ov = (double)halfSize/gridSize; gridID1[siftID] = floor(simpleY)*numXGrids1 + floor(simpleX); gridID2[siftID] = floor(simpleY)*numXGrids2 + floor(simpleX - ov); gridID3[siftID] = floor(simpleY - ov)*numXGrids1 + floor(simpleX); gridID4[siftID] = floor(simpleY -ov)*numXGrids2 + floor(simpleX - ov); if(gridID2[siftID] < 0) gridID2[siftID] = 0; if(gridID3[siftID] < 0) gridID3[siftID] = 0; if(gridID4[siftID] < 0) gridID4[siftID] = 0; // gridID corresponding to the given siftID is computed } __syncthreads(); } __global__ void findTopOfBucket(int *d_gridID,int *d_gridID2,int *d_gridID3,int *d_gridID4, int num_elements,int *d_topOfBucket_ptr,int *d_topOfBucket2_ptr,int *d_topOfBucket3_ptr,int *d_topOfBucket4_ptr,int numGrids,int numGridsXOv,int numGridsYOv,int numGridsXYOv) { // Find data location to be processed by current thread int tidx = blockIdx.x*blockDim.x + threadIdx.x; if(tidx < num_elements && tidx != 0) { d_topOfBucket_ptr[d_gridID[0]] = 0; d_topOfBucket2_ptr[d_gridID2[0]] = 0; d_topOfBucket3_ptr[d_gridID3[0]] = 0; d_topOfBucket4_ptr[d_gridID4[0]] = 0; if (d_gridID[tidx] != d_gridID[tidx-1]) d_topOfBucket_ptr[d_gridID[tidx]] = tidx; if (d_gridID2[tidx] != d_gridID2[tidx-1]) d_topOfBucket2_ptr[d_gridID2[tidx]] = tidx; if (d_gridID3[tidx] != d_gridID3[tidx-1]) d_topOfBucket3_ptr[d_gridID3[tidx]] = tidx; if (d_gridID4[tidx] != d_gridID4[tidx-1]) d_topOfBucket4_ptr[d_gridID4[tidx]] = tidx; } __syncthreads(); } __global__ void findNumSift(int *gridID,int *topOfBucket,int *gridID2,int *topOfBucket2,int *gridID3,int *topOfBucket3,int *gridID4,int *topOfBucket4,int numGrids,int numGridsXOv,int numGridsYOv,int numGridsXYOv,int *numSift,int *numSift2,int *numSift3,int *numSift4) { //=========== Find number of target sifts in each grid =========== // Find data location to be processed by current thread int gridid = blockIdx.x*blockDim.x + threadIdx.x; int i,numsift = 0; if(gridid < numGrids) { if(topOfBucket[gridid]!=-1) { for(i = topOfBucket[gridid]; gridID[i] == gridid; i++) numsift++; numSift[gridid] = numsift; } else numSift[gridid] = 0; } if(gridid < numGridsXOv) { if(topOfBucket2[gridid]!=-1) { numsift = 0; for(i = topOfBucket2[gridid]; gridID2[i] == gridid; i++) numsift++; numSift2[gridid] = numsift; } else numSift2[gridid] = 0; } if(gridid < numGridsYOv) { if(topOfBucket3[gridid]!=-1) { numsift = 0; for(i = topOfBucket3[gridid]; gridID3[i] == gridid; i++) numsift++; numSift3[gridid] = numsift; } else numSift3[gridid] = 0; } if(gridid < numGridsXYOv) { if(topOfBucket4[gridid]!=-1) { numsift = 0; for(i = topOfBucket4[gridid]; gridID4[i] == gridid; i++) numsift++; numSift4[gridid] = numsift; } else numSift4[gridid] = 0; } __syncthreads(); } __global__ void findEpipolarLine(float *fmatrix,SiftGPU::SiftKeypoint *keys,int source_width, int source_height,int width,int height,long long int *epipolarPoints,int source_num_keys) { //============ Find Epipolar Line for each source sift ============ int source_siftID = blockIdx.x*blockDim.x + threadIdx.x; if(source_siftID < source_num_keys) { float epipolarLine[3]; short x1,y1,x2,y2; // border points of line segment inside the target image float top_x, right_y, bottom_x, left_y ; // points of intersection with the top , right , bottom and left lines of the target image border rectangle float x,y; // To Euclidian Coordinate system x = keys[source_siftID].x - ((source_width-1)/2); y = ((source_height-1)/2) - keys[source_siftID].y; // E = F.x epipolarLine[0] = (fmatrix[0]*x) + (fmatrix[1]*y) + fmatrix[2]; epipolarLine[1] = (fmatrix[3]*x) + (fmatrix[4]*y) + fmatrix[5]; epipolarLine[2] = (fmatrix[6]*x) + (fmatrix[7]*y) + fmatrix[8]; //===================================================================== // // In Euclidian Coordinate system (centrailized), // line equations for target image rectangle: // --------------------------------------------------------- // y=(h-1)/2 (top) // x=(w-1)/2 (right) // y=-(h-1)/2 (bottom) // x=-(w-1)/2 (left) // // --------------------------------------------------------- // // So points of intersection with epipolar line ( ax + by + c = 0 ) are : // --------------------------------------------------------- // x = (-b(h-1)/2 -c )/a, y = (h-1)/2 // x = (w-1)/2, y = (-c-(w-1)a/2)/b // x = (-c+(h-1)b/2)/a , y = -(h-1)/2 // x = -(w-1)/2 , y =( -c+(w-1)a/2)/b // //===================================================================== //================== Find points of intersection ====================== top_x = ((-1*epipolarLine[1]*(height-1)/2)-(1*epipolarLine[2]))/epipolarLine[0] ; right_y =((-1*epipolarLine[2])-((width-1)*epipolarLine[0]/2))/epipolarLine[1]; bottom_x =((-1*epipolarLine[2])+((height-1)*epipolarLine[1]/2))/epipolarLine[0]; left_y =((-1*epipolarLine[2])+((width-1)*epipolarLine[0]/2))/epipolarLine[1]; //===================================================================== // Now these points (top , bottom ,left, right) are in Euclidian Coordinate // system ((0,0) at the centre of the image) // Back to graphics coordinate system ((0,0) at the top left corner of // image and downwards is +ve y) top_x = top_x + ((width-1)/2); bottom_x = bottom_x + ((width-1)/2); right_y = ((height-1)/2) - right_y; left_y = ((height-1)/2) - left_y; //===================================================================== //===================================================================== // // -> Now only two of these points will lie on the target image border rectangle // -> They are (x1,y1) and (x2,y2) // -> Note : x1 < x2 // //===================================================================== x1 = x2 = y1 = y2 = 0; //====================== Find x1, x2, y1, y2 ========================== if(left_y >= 0 && left_y <= height-1) { if(top_x >= 0 && top_x <= width-1) { x1 = 0; y1 = (short)left_y; x2 = (short)top_x; y2 = 0; } else if(bottom_x >= 0 && bottom_x <= width-1) { x1 = 0; y1 = (short)left_y; x2 = (short)bottom_x; y2 = height-1; } else if(right_y >= 0 && right_y <= height-1) { x1 = 0; y1 = (short)left_y; x2 = width-1; y2 = (short)right_y; } } else if(top_x >= 0 && top_x <= width-1) { if(right_y >= 0 && right_y <= height-1) { x1 = (short)top_x; y1 = 0; x2 = width-1; y2 = (short)right_y; } else if(bottom_x >= 0 && bottom_x <= width-1) { if(top_x < bottom_x) { x1 = (short)top_x; y1 = 0; x2 = (short)bottom_x; y2 = height-1; } else { x2 = (short)top_x; y2 = height-1; x1 = (short)bottom_x; y1 = 0; } } } else if(bottom_x >= 0 && bottom_x <= width-1) { if(right_y >= 0 && right_y <= height-1) { x1 = (short)bottom_x; y1 = height-1; x2 = width-1; y2 = (short)right_y; } } //===================================================================== //======= Packing all these points in a single long long int ========== long long int l = 0; l = l | (long long int)x1 << 48; l = l | (long long int)y1 << 32; l = l | (long long int)x2 << 16; l = l | (long long int)y2 << 0; epipolarPoints[source_siftID] = l; //===================================================================== } __syncthreads(); // Done ! } __global__ void clusterLines(long long int *d_epipolarPoints,int *cluster,int source_num_keys) { //===================================================================== // // d_epipolarPoints is a sorted array of packed points through which // we extract the end points of an epipolar line. If there is a // significant difference between two adjacent end points (say previous // and current), we mark the current one as 1 else 0 and store it in // cluster[current]. By default cluster[0] = 0. In this way all the // points between 1 to the next 1 fall in one group // //===================================================================== int id = blockIdx.x*blockDim.x + threadIdx.x; // id = 0 : first entry handled on the cpu cluster[0] = 0 if( id < source_num_keys && id > 0) { unsigned long long int s = 0,l; short x1,y1,x2,y2; short prev_x1,prev_y1,prev_x2,prev_y2; s = ~s; s = s >> 48; // Extract the points from the packed point l = d_epipolarPoints[id]; x1 = (short)(s & (l >> 48)); y1 = (short)(s & (l >> 32)); x2 = (short)(s & (l >> 16)); y2 = (short)(s & (l)); // Extract the points from the packed point l = d_epipolarPoints[id-1]; prev_x1 = (short)(s & (l >> 48)); prev_y1 = (short)(s & (l >> 32)); prev_x2 = (short)(s & (l >> 16)); prev_y2 = (short)(s & (l)); if(((x1-prev_x1)*(x1-prev_x1) < 4) && ((y1-prev_y1)*(y1-prev_y1)< 4) && ((x2-prev_x2)*(x2-prev_x2) < 4) && ((y2-prev_y2)*(y2-prev_y2) < 4)) // If no significant difference between current and previous { cluster[id] = 0; } else { cluster[id] = 1; } } __syncthreads(); // Done ! } __global__ void findClusterLocation(int *cluster, int source_num_keys,int *clusterLocation,int *source_siftID,int *clusterID) { //=================================================================== // // Current scene example // +-----------+---+---+---+---+---+---+---+---+----- // | tidx | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | ... // +-----------+---+---+---+---+---+---+---+---+----- // | siftid | a | b | c | d | e | f | g | h |... // +-----------+---+---+---+---+---+---+---+---+----- // | cluster | 0 | 1 | 1 | 2 | 3 | 3 | 3 | 4 |... // +-----------+---+---+---+---+---+---+---+---+----- // // So we want cluster location to be like this so that it // is easy to find out members of one cluster // +------------------+---+---+---+---+---+-------- // | cluster_id | 0 | 1 | 2 | 3 | 4 | ... // +------------------+---+---+---+---+---+-------- // | cluster_location | 0 | 1 | 3 | 4 | 7 | ... // +------------------+---+---+---+---+---+-------- // //=================================================================== // tidx = 0 handled on the CPU int tidx = blockIdx.x*blockDim.x + threadIdx.x; if(tidx < source_num_keys) { if( tidx != 0) { if (cluster[tidx] != cluster[tidx-1]) clusterLocation[cluster[tidx]] = tidx; } clusterID[source_siftID[tidx]] = cluster[tidx]; } __syncthreads(); } __device__ void getBestGrid(float x, float y, int gridSize,int halfSize,int numXGrids1,int numXGrids2,int numGridsXOv,int numGridsYOv,int *gridid, int *which,int numGrids) { int idx[4] = {0,0,0,0}; float dists[4] = {0,0,0,0}; double simpleX = ((double)x/gridSize); double simpleY = ((double)y/gridSize); double ov = (double)halfSize/gridSize; idx[0] = floor(simpleY)*numXGrids1 + floor(simpleX); idx[1] = floor(simpleY)*numXGrids2 + floor(simpleX - ov); idx[2] = floor(simpleY - ov)*numXGrids1 + floor(simpleX); idx[3] = floor(simpleY -ov)*numXGrids2 + floor(simpleX - ov); if(idx[1] < 0) idx[1] = 0; if(idx[2] < 0) idx[2] = 0; if(idx[3] < 0) idx[3] = 0; float g1Xc = (floor(simpleX))*gridSize + (float)halfSize; float g1Yc = (floor(simpleY))*gridSize + (float)halfSize; float g2Xc = (floor(simpleX - ov))*gridSize +(float) 2*halfSize; //float g2Yc = g1Yc; //float g3Xc = g1Xc; float g3Yc = (floor(simpleY - ov))*gridSize + (float)2*halfSize; //float g4Xc = g2Xc; //float g4Yc = g3Yc; float x1_d = (g1Xc - x)*(g1Xc - x); float y1_d = (g1Yc - y)*(g1Yc - y); float x2_d = (g2Xc - x)*(g2Xc - x); float y2_d = (g3Yc - y)*(g3Yc - y); dists[0] = x1_d + y1_d; dists[1] = x2_d + y1_d; dists[2] = x1_d + y2_d; dists[3] = x2_d + y2_d; float min_g_dist = 200000; int minIdx = -1; int idno = 0; for(int id=0; id < 4; id++) { if(dists[id] < min_g_dist && idx[id] >= 0) { idno = id+1; min_g_dist = dists[id]; minIdx = idx[id]; } } *gridid = minIdx; *which = idno; // *gridid = idx[0]; // *which = 1; } __global__ void findNumPotentialMatchesForEachCluster(int *gridID,int *target_siftID,int *topOfBucket,int *gridID2,int *target_siftID2,int *topOfBucket2,int *gridID3,int *target_siftID3,int *topOfBucket3,int *gridID4,int *target_siftID4,int *topOfBucket4,int gridSize,int *clusterLocation,long long int *epipolarPoints,int *numSift,int *numSift2,int *numSift3,int *numSift4,int target_height,int numXGrids1,int halfSize,int numXGrids2,int numGridsXOv,int numGridsYOv,int numGrids,int numGridsXYOv,int *numPotentialMatches) { //===================================================================== // // For each cluster, load the epipolar line and mark eqidistant points // on that line. Now find the respective grids of these points. // After knowing which all grids are a part of the epipolar line, The // potential matches are just the target sifts in those grids. // //===================================================================== int clusterID = blockIdx.x; int threadID = threadIdx.x; int clusterlocation = clusterLocation[clusterID]; //potentialMatches[maxNumPotentialMatches*clusterID] = 0; extern __shared__ int shared_array[]; int i,j,k; int distance, d, num_of_points, num_of_pts_per_thread; float cos_theta,sin_theta,x,y,slope; unsigned long long int s = 0,l; short x1,y1,x2,y2; s = ~s; s = s >> 48; l = epipolarPoints[clusterlocation]; // Extract the points from the packed point l x1 = (short)(s & (l >> 48)); y1 = (short)(s & (l >> 32)); x2 = (short)(s & (l >> 16)); y2 = (short)(s & (l)); distance = (int) sqrt((float)((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2))); d = gridSize ; // equidistant points at d distance num_of_points = ceil((float)distance/gridSize); if(distance > 5000) { num_of_points = (5000/gridSize); d = distance/num_of_points; } num_of_pts_per_thread = ceil((float)num_of_points/blockDim.x); if((x2-x1) != 0) // slope is not infinity { slope = (float)(y2-y1)/(x2-x1); cos_theta = 1/sqrt(1+(slope*slope)); sin_theta = slope/sqrt(1+(slope*slope)); } else // slope is infinite : vertical epipolar line { cos_theta = 0; sin_theta = 1; } // Compute gridIDs for num_of_pts_per_thread //========================================================================== // // Size of the following array = 10 // Reason : I assume that num_of_pts_per_thread can be 10 at max // if so then num_of_pts = 256(block_size)x10 = 2560 // assumption : min gridSize = 2 (4 actually :P ) // so length = 2x2560 = 5120 (we have already assumed max length to be 5000) // therefore no problem with this assumption :D // //========================================================================== int totalNumsift,top; int grids[10] = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}; int which[10] = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}; //int gridx,gridy,grid_id; int grid_id; totalNumsift = 0; top = -1; int which_grid = 0; for(i = 0 ; i < num_of_pts_per_thread ; i++ ) { // threadID*num_of_pts_per_thread th point x = (float)(x1+(((threadID*num_of_pts_per_thread)+i)*cos_theta*d)); y = (float)(y1+(((threadID*num_of_pts_per_thread)+i)*sin_theta*d)); if(top == 10) break; if( x >= (float)x1 && x <= (float)x2 && y <= (float)target_height-1 && y >= 0.0) // the point lies in the rectangle { // get best grid id // Compute gridID of the point getBestGrid(x, y,gridSize, halfSize,numXGrids1, numXGrids2, numGridsXOv,numGridsYOv,&grid_id,&which_grid,numGrids); if(top == -1) { if(which_grid == 1) { totalNumsift += numSift[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 2) { totalNumsift += numSift2[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 3) { totalNumsift += numSift3[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 4) { totalNumsift += numSift4[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } } else if(grid_id!=grids[top]) { if(which_grid == 1) { totalNumsift += numSift[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 2) { totalNumsift += numSift2[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 3) { totalNumsift += numSift3[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 4) { totalNumsift += numSift4[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } } } } shared_array[threadID] = totalNumsift; __syncthreads(); //========================================================================= // Find the number of target sifts each thread has to process // and write it into shared memory // to make sure that all the threads have written their num of sifts in the shared memory // Now we need to compute the prefix sum of this shared array which has 256 elements as of now // to know the position to write in the global memory for each thread //=========================== Prefix Sum ==================================== //================================ Upsweep ================================== if(threadID < 128) shared_array[256+threadID] = shared_array[2*threadID]+shared_array[(2*threadID)+1]; __syncthreads(); if(threadID < 64) shared_array[256+128+threadID] = shared_array[256+(2*threadID)]+shared_array[256+(2*threadID)+1]; __syncthreads(); if(threadID < 32) shared_array[256+128+64+threadID] = shared_array[256+128+(2*threadID)]+shared_array[256+128+(2*threadID)+1]; __syncthreads(); if(threadID < 16) shared_array[256+128+64+32+threadID] = shared_array[256+128+64+(2*threadID)]+shared_array[256+128+64+(2*threadID)+1]; __syncthreads(); if(threadID < 8) shared_array[256+128+64+32+16+threadID] = shared_array[256+128+64+32+(2*threadID)]+shared_array[256+128+64+32+(2*threadID)+1]; __syncthreads(); if(threadID < 4) shared_array[256+128+64+32+16+8+threadID] = shared_array[256+128+64+32+16+(2*threadID)]+shared_array[256+128+64+32+16+(2*threadID)+1]; __syncthreads(); if(threadID < 2) shared_array[256+128+64+32+16+8+4+threadID] = shared_array[256+128+64+32+16+8+(2*threadID)]+shared_array[256+128+64+32+16+8+(2*threadID)+1]; __syncthreads(); if(threadID < 1) shared_array[256+128+64+32+16+8+4+2+threadID] = shared_array[256+128+64+32+16+8+4+(2*threadID)]+shared_array[256+128+64+32+16+8+4+(2*threadID)+1]; __syncthreads(); //======================== Downsweep ============================== if(threadID < 2 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+16+8+4+threadID] = shared_array[256+128+64+32+16+8+4+2+(threadID/2)]; else shared_array[256+128+64+32+16+8+4+threadID] += shared_array[256+128+64+32+16+8+4+2+(threadID/2)-1]; } __syncthreads(); if(threadID < 4 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+16+8+threadID] = shared_array[256+128+64+32+16+8+4+(threadID/2)]; else shared_array[256+128+64+32+16+8+threadID] += shared_array[256+128+64+32+16+8+4+(threadID/2)-1]; } __syncthreads(); if(threadID < 8 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+16+threadID] = shared_array[256+128+64+32+16+8+(threadID/2)]; else shared_array[256+128+64+32+16+threadID] += shared_array[256+128+64+32+16+8+(threadID/2)-1]; } __syncthreads(); if(threadID < 16 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+threadID] = shared_array[256+128+64+32+16+(threadID/2)]; else shared_array[256+128+64+32+threadID] += shared_array[256+128+64+32+16+(threadID/2)-1]; } __syncthreads(); if(threadID < 32 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+threadID] = shared_array[256+128+64+32+(threadID/2)]; else shared_array[256+128+64+threadID] += shared_array[256+128+64+32+(threadID/2)-1]; } __syncthreads(); if(threadID < 64 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+threadID] = shared_array[256+128+64+(threadID/2)]; else shared_array[256+128+threadID] += shared_array[256+128+64+(threadID/2)-1]; } __syncthreads(); if(threadID < 128 && threadID!=0) { if(threadID%2 != 0) shared_array[256+threadID] = shared_array[256+128+(threadID/2)]; else shared_array[256+threadID] += shared_array[256+128+(threadID/2)-1]; } __syncthreads(); if(threadID < 256 && threadID!=0) { if(threadID%2 != 0) shared_array[threadID] = shared_array[256+(threadID/2)]; else shared_array[threadID] += shared_array[256+(threadID/2)-1]; } __syncthreads(); //========================================================================== // // Now shared array[0..255] contains prefix sum // There is an array of size maxNumPotentialMatches for each cluster, we will write the potential matches there // The 1st entry would be the total number of potential matches and // then each thread will write the matching sift ids it found at write position // //========================================================================== if(threadID == 0) { // printf("%d \n",shared_array[255]); numPotentialMatches[clusterID] = shared_array[255]; } __syncthreads(); } __global__ void findPotentialMatchesForEachCluster(int *gridID,int *target_siftID,int *topOfBucket,int *gridID2,int *target_siftID2,int *topOfBucket2,int *gridID3,int *target_siftID3,int *topOfBucket3,int *gridID4,int *target_siftID4,int *topOfBucket4,int gridSize,int *clusterLocation,long long int *epipolarPoints,int *numSift,int *numSift2,int *numSift3,int *numSift4,int *potentialMatches,int target_height,int numXGrids1,int halfSize,int numXGrids2,int numGridsXOv,int numGridsYOv,int numGrids,int numGridsXYOv,int maxNumPotentialMatches) { //===================================================================== // // For each cluster, load the epipolar line and mark eqidistant points // on that line. Now find the respective grids of these points. // After knowing which all grids are a part of the epipolar line, The // potential matches are just the target sifts in those grids. // //===================================================================== int clusterID = blockIdx.x; int threadID = threadIdx.x; int clusterlocation = clusterLocation[clusterID]; potentialMatches[(2+maxNumPotentialMatches)*clusterID] = 0; extern __shared__ int shared_array[]; int i,j,k; int distance, d, num_of_points, num_of_pts_per_thread; float cos_theta,sin_theta,x,y,slope; unsigned long long int s = 0,l; short x1,y1,x2,y2; s = ~s; s = s >> 48; l = epipolarPoints[clusterlocation]; // Extract the points from the packed point l x1 = (short)(s & (l >> 48)); y1 = (short)(s & (l >> 32)); x2 = (short)(s & (l >> 16)); y2 = (short)(s & (l)); distance = (int) sqrt((float)((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2))); d = gridSize ; // equidistant points at d distance num_of_points = ceil((float)distance/gridSize); if(distance > 5000) { num_of_points = (5000/gridSize); d = distance/num_of_points; } num_of_pts_per_thread = ceil((float)num_of_points/blockDim.x); if((x2-x1) != 0) // slope is not infinity { slope = (float)(y2-y1)/(x2-x1); cos_theta = 1/sqrt(1+(slope*slope)); sin_theta = slope/sqrt(1+(slope*slope)); } else // slope is infinite : vertical epipolar line { cos_theta = 0; sin_theta = 1; } // Compute gridIDs for num_of_pts_per_thread //========================================================================== // // Size of the following array = 10 // Reason : I assume that num_of_pts_per_thread can be 10 at max // if so then num_of_pts = 256(block_size)x10 = 2560 // assumption : min gridSize = 2 (4 actually :P ) // so length = 2x2560 = 5120 (we have already assumed max length to be 5000) // therefore no problem with this assumption :D // //========================================================================== int totalNumsift,top; int grids[10] = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}; int which[10] = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}; //int gridx,gridy,grid_id; int grid_id; totalNumsift = 0; top = -1; int which_grid = 0; for(i = 0 ; i < num_of_pts_per_thread ; i++ ) { // threadID*num_of_pts_per_thread th point x = (float)(x1+(((threadID*num_of_pts_per_thread)+i)*cos_theta*d)); y = (float)(y1+(((threadID*num_of_pts_per_thread)+i)*sin_theta*d)); if(top == 10) break; if( x >= (float)x1 && x <= (float)x2 && y <= (float)target_height-1 && y >= 0.0) // the point lies in the rectangle { // get best grid id // Compute gridID of the point getBestGrid(x, y,gridSize, halfSize,numXGrids1, numXGrids2, numGridsXOv,numGridsYOv,&grid_id,&which_grid,numGrids); if(top == -1) { if(which_grid == 1) { totalNumsift += numSift[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 2) { totalNumsift += numSift2[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 3) { totalNumsift += numSift3[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 4) { totalNumsift += numSift4[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } } else if(grid_id!=grids[top]) { if(which_grid == 1) { totalNumsift += numSift[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 2) { totalNumsift += numSift2[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 3) { totalNumsift += numSift3[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } else if(which_grid == 4) { totalNumsift += numSift4[grid_id]; grids[++top] = grid_id; which[top] = which_grid; } } } } shared_array[threadID] = totalNumsift; __syncthreads(); //========================================================================= // Find the number of target sifts each thread has to process // and write it into shared memory // to make sure that all the threads have written their num of sifts in the shared memory // Now we need to compute the prefix sum of this shared array which has 256 elements as of now // to know the position to write in the global memory for each thread //=========================== Prefix Sum ==================================== //================================ Upsweep ================================== if(threadID < 128) shared_array[256+threadID] = shared_array[2*threadID]+shared_array[(2*threadID)+1]; __syncthreads(); if(threadID < 64) shared_array[256+128+threadID] = shared_array[256+(2*threadID)]+shared_array[256+(2*threadID)+1]; __syncthreads(); if(threadID < 32) shared_array[256+128+64+threadID] = shared_array[256+128+(2*threadID)]+shared_array[256+128+(2*threadID)+1]; __syncthreads(); if(threadID < 16) shared_array[256+128+64+32+threadID] = shared_array[256+128+64+(2*threadID)]+shared_array[256+128+64+(2*threadID)+1]; __syncthreads(); if(threadID < 8) shared_array[256+128+64+32+16+threadID] = shared_array[256+128+64+32+(2*threadID)]+shared_array[256+128+64+32+(2*threadID)+1]; __syncthreads(); if(threadID < 4) shared_array[256+128+64+32+16+8+threadID] = shared_array[256+128+64+32+16+(2*threadID)]+shared_array[256+128+64+32+16+(2*threadID)+1]; __syncthreads(); if(threadID < 2) shared_array[256+128+64+32+16+8+4+threadID] = shared_array[256+128+64+32+16+8+(2*threadID)]+shared_array[256+128+64+32+16+8+(2*threadID)+1]; __syncthreads(); if(threadID < 1) shared_array[256+128+64+32+16+8+4+2+threadID] = shared_array[256+128+64+32+16+8+4+(2*threadID)]+shared_array[256+128+64+32+16+8+4+(2*threadID)+1]; __syncthreads(); //======================== Downsweep ============================== if(threadID < 2 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+16+8+4+threadID] = shared_array[256+128+64+32+16+8+4+2+(threadID/2)]; else shared_array[256+128+64+32+16+8+4+threadID] += shared_array[256+128+64+32+16+8+4+2+(threadID/2)-1]; } __syncthreads(); if(threadID < 4 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+16+8+threadID] = shared_array[256+128+64+32+16+8+4+(threadID/2)]; else shared_array[256+128+64+32+16+8+threadID] += shared_array[256+128+64+32+16+8+4+(threadID/2)-1]; } __syncthreads(); if(threadID < 8 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+16+threadID] = shared_array[256+128+64+32+16+8+(threadID/2)]; else shared_array[256+128+64+32+16+threadID] += shared_array[256+128+64+32+16+8+(threadID/2)-1]; } __syncthreads(); if(threadID < 16 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+32+threadID] = shared_array[256+128+64+32+16+(threadID/2)]; else shared_array[256+128+64+32+threadID] += shared_array[256+128+64+32+16+(threadID/2)-1]; } __syncthreads(); if(threadID < 32 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+64+threadID] = shared_array[256+128+64+32+(threadID/2)]; else shared_array[256+128+64+threadID] += shared_array[256+128+64+32+(threadID/2)-1]; } __syncthreads(); if(threadID < 64 && threadID!=0) { if(threadID%2 != 0) shared_array[256+128+threadID] = shared_array[256+128+64+(threadID/2)]; else shared_array[256+128+threadID] += shared_array[256+128+64+(threadID/2)-1]; } __syncthreads(); if(threadID < 128 && threadID!=0) { if(threadID%2 != 0) shared_array[256+threadID] = shared_array[256+128+(threadID/2)]; else shared_array[256+threadID] += shared_array[256+128+(threadID/2)-1]; } __syncthreads(); if(threadID < 256 && threadID!=0) { if(threadID%2 != 0) shared_array[threadID] = shared_array[256+(threadID/2)]; else shared_array[threadID] += shared_array[256+(threadID/2)-1]; } __syncthreads(); //========================================================================== // // Now shared array[0..255] contains prefix sum // There is an array of size maxNumPotentialMatches for each cluster, we will write the potential matches there // The 1st entry would be the total number of potential matches and // then each thread will write the matching sift ids it found at write position // //========================================================================== potentialMatches[(2+maxNumPotentialMatches)*clusterID] = shared_array[255]; // total num of potential matches int writePosition = 1; if(threadID > 0) writePosition = shared_array[threadID-1] + 1; __syncthreads(); k = 0; for(i=0;i<=top;i++) { if(which[i] == 1) { if(topOfBucket[grids[i]]!=-1) { for(j = topOfBucket[grids[i]]; grids[i] == gridID[j]; j++) { potentialMatches[((2+maxNumPotentialMatches)*clusterID)+writePosition+k] = target_siftID[j]; k++; } } } else if(which[i] == 2) { if(topOfBucket2[grids[i]]!=-1) { for(j = topOfBucket2[grids[i]]; grids[i] == gridID2[j]; j++) { potentialMatches[((2+maxNumPotentialMatches)*clusterID)+writePosition+k] = target_siftID2[j]; k++; } } } else if(which[i] == 3) { if(topOfBucket3[grids[i]]!=-1) { for(j = topOfBucket3[grids[i]]; grids[i] == gridID3[j]; j++) { potentialMatches[((2+maxNumPotentialMatches)*clusterID)+writePosition+k] = target_siftID3[j]; k++; } } } else if(which[i] == 4) { if(topOfBucket4[grids[i]]!=-1) { for(j = topOfBucket4[grids[i]]; grids[i] == gridID4[j]; j++) { potentialMatches[((2+maxNumPotentialMatches)*clusterID)+writePosition+k] = target_siftID4[j]; k++; } } } } __syncthreads(); } __device__ void findmin1min2(int a, int b , int c, int d,int a_id,int b_id,int c_id,int d_id,int *min1,int *min2,int *min1_id,int *min2_id) { //======================================================================== // This chunk of code finds the minimum and next minimun and correnponding // minimum and next minimum ID between a,b,c,d //======================================================================== if( a < b && a < c && a < d ) { *min1 = a; *min1_id = a_id; if(b < c && b < d) { *min2 = b; *min2_id = b_id; } else if(c < d) { *min2 = c; *min2_id = c_id; } else { *min2 = d; *min2_id = d_id; } } else if(b < c && b < d) { *min1 = b; *min1_id = b_id; if(a < c && a < d) { *min2 = a; *min2_id = a_id; } else if(c < d) { *min2 = c; *min2_id = c_id; } else { *min2 = d; *min2_id = d_id; } } else if(c < d) { *min1 = c; *min1_id = c_id; if(a < b && a < d) { *min2 = a; *min2_id = a_id; } else if(b < d) { *min2 = b; *min2_id = b_id; } else { *min2 = d; *min2_id = d_id; } } else { *min1 = d; *min1_id = d_id; if(a < b && a < c) { *min2 = a; *min2_id = a_id; } else if(b < c) { *min2 = b; *min2_id = b_id; } else { *min2 = c; *min2_id = c_id; } } } __global__ void findMatches(int *matches,int *clusterID,int *potentialMatches,int maxNumPotentialMatches) { //======================================================================== // // For each source sift, find the cluster it belongs to // Then load the potential matching target sifts of that cluster and // compute the L2 distance between the descriptors. // Now find the min and next min of these distances. // Declare a match based on some tests. // //======================================================================== int source_siftID,threadID,cluster_id,num_potential_matches,num_pts_per_thread,target_siftID; int min_id,next_min_id,i,j; int min_num,next_min,dist; extern __shared__ int shared_array[]; source_siftID = blockIdx.x; threadID = threadIdx.x; cluster_id = clusterID[source_siftID]; num_potential_matches = potentialMatches[(2+maxNumPotentialMatches)*cluster_id]; // matches[source_siftID] = -1; if (num_potential_matches%blockDim.x == 0) num_pts_per_thread = num_potential_matches/blockDim.x; else num_pts_per_thread = 1 + (num_potential_matches/blockDim.x); // if(num_potential_matches > maxNumPotentialMatches) // printf("%d\n",num_potential_matches); min_num = 999999; next_min = 999999; min_id = -1; next_min_id = -1; int dummy; int a,b; for(i = 0;i < num_pts_per_thread; i++) { if((num_pts_per_thread*threadID)+i < num_potential_matches) { target_siftID = potentialMatches[((2+maxNumPotentialMatches)*cluster_id)+1+(num_pts_per_thread*threadID)+i]; dist = 0; for(j = 0 ; j < 128; j++) { a = tex1Dfetch(tex_targetDescriptor,(target_siftID*128)+j); b = tex1Dfetch(tex_sourceDescriptor,(source_siftID*128)+j); dummy = a-b; dist+=(dummy*dummy); } // for( j = 0 ; j < 128 ; j++) // { // dist+= (((int)tex1Dfetch(tex_sourceDescriptor,(source_siftID*128)+j) - ((int)tex1Dfetch(tex_targetDescriptor,(target_siftID*128)+j)))*((int)tex1Dfetch(tex_sourceDescriptor,(source_siftID*128)+j) - ((int)tex1Dfetch(tex_targetDescriptor,(target_siftID*128)+j)))); // } if(dist <= min_num) { next_min = min_num; next_min_id = min_id; min_num = dist; min_id = target_siftID; } else if(dist < next_min) { next_min = dist; next_min_id = target_siftID; } } } // Store the min and next min found by each thread into the shared memory shared_array[threadID] = min_num; shared_array[threadID+blockDim.x] = next_min; shared_array[threadID+2*blockDim.x] = min_id; shared_array[threadID+3*blockDim.x] = next_min_id; __syncthreads(); // Shared array fully occupied // Time to compute the min and next min across the block ( min and next min in the shared memory ) int c,d; int a_id,b_id,c_id,d_id; int min1,min2; int min1_id,min2_id; if(threadID < 128) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+128]; d = shared_array[threadID+blockDim.x+128]; c_id = shared_array[threadID+2*blockDim.x+128]; d_id = shared_array[threadID+3*blockDim.x+128]; findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); shared_array[threadID] = min1; shared_array[threadID+blockDim.x] = min2; shared_array[threadID+2*blockDim.x] = min1_id; shared_array[threadID+3*blockDim.x] = min2_id; } __syncthreads(); if(threadID < 64) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+64]; d = shared_array[threadID+blockDim.x+64]; c_id = shared_array[threadID+2*blockDim.x+64]; d_id = shared_array[threadID+3*blockDim.x+64]; findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); shared_array[threadID] = min1; shared_array[threadID+blockDim.x] = min2; shared_array[threadID+2*blockDim.x] = min1_id; shared_array[threadID+3*blockDim.x] = min2_id; } __syncthreads(); if(threadID < 32) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+32]; d = shared_array[threadID+blockDim.x+32]; c_id = shared_array[threadID+2*blockDim.x+32]; d_id = shared_array[threadID+3*blockDim.x+32]; findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); shared_array[threadID] = min1; shared_array[threadID+blockDim.x] = min2; shared_array[threadID+2*blockDim.x] = min1_id; shared_array[threadID+3*blockDim.x] = min2_id; } __syncthreads(); if(threadID < 16) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+16]; d = shared_array[threadID+blockDim.x+16]; c_id = shared_array[threadID+2*blockDim.x+16]; d_id = shared_array[threadID+3*blockDim.x+16]; findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); shared_array[threadID] = min1; shared_array[threadID+blockDim.x] = min2; shared_array[threadID+2*blockDim.x] = min1_id; shared_array[threadID+3*blockDim.x] = min2_id; } __syncthreads(); if(threadID < 8) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+8]; d = shared_array[threadID+blockDim.x+8]; c_id = shared_array[threadID+2*blockDim.x+8]; d_id = shared_array[threadID+3*blockDim.x+8]; findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); shared_array[threadID] = min1; shared_array[threadID+blockDim.x] = min2; shared_array[threadID+2*blockDim.x] = min1_id; shared_array[threadID+3*blockDim.x] = min2_id; } __syncthreads(); if(threadID < 4) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+4]; d = shared_array[threadID+blockDim.x+4]; c_id = shared_array[threadID+2*blockDim.x+4]; d_id = shared_array[threadID+3*blockDim.x+4]; findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); shared_array[threadID] = min1; shared_array[threadID+blockDim.x] = min2; shared_array[threadID+2*blockDim.x] = min1_id; shared_array[threadID+3*blockDim.x] = min2_id; } __syncthreads(); if(threadID < 2) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+2]; d = shared_array[threadID+blockDim.x+2]; c_id = shared_array[threadID+2*blockDim.x+2]; d_id = shared_array[threadID+3*blockDim.x+2]; findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); shared_array[threadID] = min1; shared_array[threadID+blockDim.x] = min2; shared_array[threadID+2*blockDim.x] = min1_id; shared_array[threadID+3*blockDim.x] = min2_id; } __syncthreads(); if(threadID == 0) { a = shared_array[threadID]; b = shared_array[threadID+blockDim.x]; a_id = shared_array[threadID+2*blockDim.x]; b_id = shared_array[threadID+3*blockDim.x]; c = shared_array[threadID+1]; d = shared_array[threadID+blockDim.x+1]; c_id = shared_array[threadID+2*blockDim.x+1]; d_id = shared_array[threadID+3*blockDim.x+1]; // Compute min and next min findmin1min2(a,b,c,d,a_id,b_id,c_id,d_id,&min1,&min2,&min1_id,&min2_id); // Ratio test if((float)min1/min2 < 0.36 && min1_id > 0 && num_potential_matches > 9 && min2_id != 999999) // we store the squared distances so the ratio becomes 0.36 instead of 0.6 { // Declare match matches[source_siftID] = min1_id; } } __syncthreads(); } int max(int a,int b,int c,int d) { int maxim = -9999; if(maxim < a) maxim = a; if(maxim < b) maxim = b; if(maxim < c) maxim = c; if(maxim < d) maxim = d; return maxim; } double Matcher::matchImagePair(sift_img source,sift_img target,float *f_matrix,SiftGPU::SiftKeypoint* &d_keys1,unsigned char* &descriptors1 ,SiftGPU::SiftKeypoint* &d_keys2,unsigned char* &descriptors2 ) { struct timespec t1, t2, t3, t4, t5; clock_gettime(CLOCK_MONOTONIC, &t3); clock_gettime(CLOCK_MONOTONIC, &t1); //================== GPU initialization ======================= cudaSetDevice(0); cudaFree(0); //============================================================= clock_gettime(CLOCK_MONOTONIC, &t2); double time_gpu_init = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================= //============================================================= clock_gettime(CLOCK_MONOTONIC, &t1); //================== Declare variables ======================== int i,j; int *h_cluster, *h_gridID; long long int *h_epipolarPoints; // Stores ends of the epipolar line segment packed in a single long long int h_epipolarPoints = (long long int*) malloc(sizeof(long long int)*source.num_keys); h_cluster = (int *) malloc(sizeof(int)*source.num_keys); h_gridID = (int *) malloc(sizeof(int)*target.num_keys); //to store gridIDs on host clock_gettime(CLOCK_MONOTONIC, &t2); double time_total_cpu_alloc = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); clock_gettime(CLOCK_MONOTONIC, &t1); //==================== Declare device arrays =================== clock_gettime(CLOCK_MONOTONIC, &t5); // float *d_target_x, *d_target_y, *d_source_x, *d_source_y; // SiftGPU::SiftKeypoint *d_keys1, *d_keys2; float *d_fmatrix; // float *desc1,*desc2; int *d_gridID , *d_target_siftID , *d_gridID1 , *d_target_siftID1, *d_gridID2 , *d_target_siftID2,*d_gridID3 , *d_target_siftID3,*d_gridID4 , *d_target_siftID4, *d_source_siftID, *d_cluster, *d_clusterID; //// unsigned char *d_source_keypoints,*d_target_keypoints; long long int *d_epipolarPoints; //============================================================== //=================== Allocate memory on device ================ // cudaMalloc((void**)&d_keys1, sizeof(SiftGPU::SiftKeypoint)*source.num_keys); // cudaMalloc((void**)&d_keys2, sizeof(SiftGPU::SiftKeypoint)*target.num_keys); // cudaMalloc((void**)&desc1, 128*sizeof(float)*source.num_keys); // cudaMalloc((void**)&desc2, 128*sizeof(float)*target.num_keys); // cudaMalloc((void**)&d_source_x, sizeof(float)*source.num_keys); // cudaMalloc((void**)&d_source_y, sizeof(float)*source.num_keys); // cudaMalloc((void**)&d_target_x, sizeof(float)*target.num_keys); // cudaMalloc((void**)&d_target_y, sizeof(float)*target.num_keys); //// cudaMalloc((void**)&d_source_keypoints, sizeof(unsigned char)*128*source.num_keys); //// cudaMalloc((void**)&d_target_keypoints, sizeof(unsigned char)*128*target.num_keys); cudaMalloc((void**)&d_source_siftID, sizeof(int)*source.num_keys); cudaMalloc((void**)&d_target_siftID, sizeof(int)*target.num_keys); cudaMalloc((void**)&d_target_siftID1, sizeof(int)*target.num_keys); cudaMalloc((void**)&d_target_siftID2, sizeof(int)*target.num_keys); cudaMalloc((void**)&d_target_siftID3, sizeof(int)*target.num_keys); cudaMalloc((void**)&d_target_siftID4, sizeof(int)*target.num_keys); cudaMalloc((void**)&d_fmatrix, sizeof(float)*9); cudaMalloc((void**)&d_gridID, sizeof(int)*target.num_keys); cudaMalloc((void**)&d_gridID1, sizeof(int)*target.num_keys); cudaMalloc((void**)&d_gridID2, sizeof(int)*target.num_keys); cudaMalloc((void**)&d_gridID3, sizeof(int)*target.num_keys); cudaMalloc((void**)&d_gridID4, sizeof(int)*target.num_keys); cudaMalloc((void**)&d_epipolarPoints, sizeof(long long int)*source.num_keys); cudaMalloc((void**)&d_cluster, sizeof(int)*source.num_keys); cudaMalloc((void**)&d_clusterID, sizeof(int)*source.num_keys); //============================================================== clock_gettime(CLOCK_MONOTONIC, &t2); double time_malloc1 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //================ Copy arrays from host to device ============= // cudaMemcpy(desc1, descriptors1.data(), sizeof(float)*source.num_keys*128, cudaMemcpyHostToDevice); // cudaMemcpy(desc2, descriptors2.data(), sizeof(float)*target.num_keys*128, cudaMemcpyHostToDevice); clock_gettime(CLOCK_MONOTONIC, &t1); // cudaMemcpy(d_keys1, keys1.data(), sizeof(SiftGPU::SiftKeypoint)*source.num_keys, cudaMemcpyHostToDevice); // cudaMemcpy(d_keys2, keys2.data(), sizeof(SiftGPU::SiftKeypoint)*target.num_keys, cudaMemcpyHostToDevice); cudaMemcpy(d_fmatrix, f_matrix, sizeof(float)*9, cudaMemcpyHostToDevice); // cudaMemcpy(d_target_keypoints, target.keypoints, sizeof(unsigned char)*target.num_keys*128, cudaMemcpyHostToDevice); // cudaMemcpy(d_source_keypoints, source.keypoints, sizeof(unsigned char)*source.num_keys*128, cudaMemcpyHostToDevice); //============================================================== int threadsPerBlock; int numBlocks; thrust::device_ptr<int> d_target_sift_ptr(d_target_siftID); thrust::device_ptr<int> d_source_sift_ptr(d_source_siftID); thrust::sequence(d_target_sift_ptr,d_target_sift_ptr+target.num_keys); thrust::sequence(d_source_sift_ptr,d_source_sift_ptr+source.num_keys); // initialize a device_vector with the list //// thrust::host_vector<unsigned char> h_d1(descriptors1.begin(), descriptors1.end()); //// thrust::host_vector<unsigned char> h_d2(descriptors2.begin(), descriptors2.end()); //// thrust::device_vector<unsigned char> d1 = h_d1; //// thrust::device_vector<unsigned char> d2 = h_d2; // thrust::device_ptr<unsigned char> d_desc1_ptr(desc1); // thrust::device_ptr<unsigned char> d_desc2_ptr(desc2); // thrust::device_vector<unsigned char> u_d1(128*source.num_keys); // thrust::device_vector<unsigned char> u_d2(128*target.num_keys); // thrust::transform(d1.begin(), d1.end(), u_d1.begin(), cast()); // thrust::transform(d2.begin(), d2.end(), u_d2.begin(), cast()); // thrust::transform(d_desc1_ptr,d_desc1_ptr + 128*source.num_keys, u_d1.begin(), cast()); // thrust::transform(d_desc2_ptr,d_desc2_ptr + 128*target.num_keys, u_d2.begin(), cast()); //// unsigned char * desc1_ptr = thrust::raw_pointer_cast(d1.data()); //// unsigned char * desc2_ptr = thrust::raw_pointer_cast(d2.data()); // unsigned char *h = (unsigned char *)malloc(128*target.num_keys*sizeof(unsigned char)); // unsigned char * dv_ptr = thrust::raw_pointer_cast(u_d2.data()); // cudaMemcpy(h,dv_ptr,128*sizeof(unsigned char)*target.num_keys, cudaMemcpyDeviceToHost); // for(int i = 0; i < 128*target.num_keys; i++) // { // printf("%hhu\n",h[i]); // } cudaMemcpy(d_target_siftID1, d_target_siftID, sizeof(int)*target.num_keys, cudaMemcpyDeviceToDevice); cudaMemcpy(d_target_siftID2, d_target_siftID, sizeof(int)*target.num_keys, cudaMemcpyDeviceToDevice); cudaMemcpy(d_target_siftID3, d_target_siftID, sizeof(int)*target.num_keys, cudaMemcpyDeviceToDevice); cudaMemcpy(d_target_siftID4, d_target_siftID, sizeof(int)*target.num_keys, cudaMemcpyDeviceToDevice); //====================== Bind textures ========================= cudaBindTexture (0, tex_targetDescriptor, descriptors2, sizeof(unsigned char)*target.num_keys*128 ); cudaBindTexture (0, tex_sourceDescriptor, descriptors1, sizeof(unsigned char)*source.num_keys*128 ); // cudaBindTexture (0, tex_targetDescriptor, d_target_keypoints, sizeof(unsigned char)*target.num_keys*128 ); // cudaBindTexture (0, tex_sourceDescriptor, d_source_keypoints, sizeof(unsigned char)*source.num_keys*128 ); clock_gettime(CLOCK_MONOTONIC, &t2); double time_memcpy1 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //printf("%d %d\n",source.num_keys,target.num_keys); //====================== INDEXING STEP ======================== // The indexing step divides the target image into grids. // The features get indexed based on the grid to which they belong. //============================================================== Grid target_grid; target_grid.g_init(target); //================ Decide kernel configuration ================= threadsPerBlock = 512; numBlocks = ceil(((float)target.num_keys)/threadsPerBlock); //===================== Call the GPU Kernel ==================== // This kernel computes gridID for each sift of the target image clock_gettime(CLOCK_MONOTONIC, &t1); computeGridID<<<numBlocks, threadsPerBlock>>>(d_keys2, d_gridID1, d_gridID2, d_gridID3, d_gridID4, target.num_keys,target_grid.gridSize, target_grid.numXGrids1,target_grid.halfSize,target_grid.numXGrids2,target_grid.numGrids,target_grid.numGridsXOv,target_grid.numGridsYOv); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_computeGridID = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== // Current situation // +-------+---+---+---+---+---+---+---+---- // |SiftID | 0 | 1 | 2 | 3 | 4 | 5 | 6 | ... // +-------+---+---+---+---+---+---+---+---- // |GridID | a | b | c | d | e | f | g | ... // +-------+---+---+---+---+---+---+---+---- // Desired situation // +-------+---+---+---+---+---+---+---+---- // |GridID | 0 | 0 | 2 | 5 | 5 | 5 | 6 | ... // +-------+---+---+---+---+---+---+---+---- // |SiftID | a | b | c | d | e | f | g | ... // +-------+---+---+---+---+---+---+---+---- // Sort to bring sifts with same gridID together //========= Sort : keys = gridID , values = siftID =========== clock_gettime(CLOCK_MONOTONIC, &t1); thrust::device_ptr<int> d_gridID_ptr1(d_gridID1); thrust::device_ptr<int> d_target_siftID_ptr1(d_target_siftID1); thrust::device_ptr<int> d_gridID_ptr2(d_gridID2); thrust::device_ptr<int> d_target_siftID_ptr2(d_target_siftID2); thrust::device_ptr<int> d_gridID_ptr3(d_gridID3); thrust::device_ptr<int> d_target_siftID_ptr3(d_target_siftID3); thrust::device_ptr<int> d_gridID_ptr4(d_gridID4); thrust::device_ptr<int> d_target_siftID_ptr4(d_target_siftID4); thrust::sort_by_key(d_gridID_ptr1, d_gridID_ptr1+target.num_keys,d_target_siftID_ptr1); thrust::sort_by_key(d_gridID_ptr2, d_gridID_ptr2+target.num_keys,d_target_siftID_ptr2); thrust::sort_by_key(d_gridID_ptr3, d_gridID_ptr3+target.num_keys,d_target_siftID_ptr3); thrust::sort_by_key(d_gridID_ptr4, d_gridID_ptr4+target.num_keys,d_target_siftID_ptr4); clock_gettime(CLOCK_MONOTONIC, &t2); double time_thrust1 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); // Current situation // +-------+---+---+---+---+---+---+---+---- // |GridID | 0 | 0 | 2 | 5 | 5 | 5 | 6 | ... // +-------+---+---+---+---+---+---+---+---- // |SiftID | a | b | c | d | e | f | g | ... // +-------+---+---+---+---+---+---+---+---- // Now to access all sifts corresponding to gridID = x we record topOfBucket // +---------------+---+---+---+---+---+---+---+--- // |GridID | 0 | 1 | 2 | 3 | 4 | 5 | 6 | ... // +---------------+---+---+---+---+---+---+---+--- // |TopOfBucket | 0 |-1 | 2 |-1 |-1 | 3 | 6 | ... // +---------------+---+---+---+---+---+---+---+--- // filhal cudaMemcpy(d_gridID, d_gridID1, sizeof(int)*target.num_keys, cudaMemcpyDeviceToDevice); cudaMemcpy(d_target_siftID, d_target_siftID1, sizeof(int)*target.num_keys, cudaMemcpyDeviceToDevice); //================== Record topOfBucket ===================== clock_gettime(CLOCK_MONOTONIC, &t1); thrust::device_vector<int> d_topOfBucket(target_grid.numGrids, -1); thrust::device_vector<int> d_numSift(target_grid.numGrids, 0); thrust::device_vector<int> d_topOfBucket2(target_grid.numGridsXOv, -1); thrust::device_vector<int> d_numSift2(target_grid.numGridsXOv, 0); thrust::device_vector<int> d_topOfBucket3(target_grid.numGridsYOv, -1); thrust::device_vector<int> d_numSift3(target_grid.numGridsYOv, 0); thrust::device_vector<int> d_topOfBucket4(target_grid.numGridsXYOv, -1); thrust::device_vector<int> d_numSift4(target_grid.numGridsXYOv, 0); int *d_topOfBucket_ptr = thrust::raw_pointer_cast(d_topOfBucket.data()); int *d_numSift_ptr = thrust::raw_pointer_cast(d_numSift.data()); int *d_topOfBucket2_ptr = thrust::raw_pointer_cast(d_topOfBucket2.data()); int *d_numSift2_ptr = thrust::raw_pointer_cast(d_numSift2.data()); int *d_topOfBucket3_ptr = thrust::raw_pointer_cast(d_topOfBucket3.data()); int *d_numSift3_ptr = thrust::raw_pointer_cast(d_numSift3.data()); int *d_topOfBucket4_ptr = thrust::raw_pointer_cast(d_topOfBucket4.data()); int *d_numSift4_ptr = thrust::raw_pointer_cast(d_numSift4.data()); clock_gettime(CLOCK_MONOTONIC, &t2); double time_thrust2 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //================== Call the GPU kernel ==================== clock_gettime(CLOCK_MONOTONIC, &t1); findTopOfBucket<<<numBlocks, threadsPerBlock>>>(d_gridID,d_gridID2,d_gridID3,d_gridID4, target.num_keys,d_topOfBucket_ptr,d_topOfBucket2_ptr,d_topOfBucket3_ptr,d_topOfBucket4_ptr,target_grid.numGrids,target_grid.numGridsXOv,target_grid.numGridsYOv,target_grid.numGridsXYOv); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_topOfBucket = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //========== Record the number of sifts in each grid =========== threadsPerBlock = 512; numBlocks = ceil((float)(max(target_grid.numGrids,target_grid.numGridsXOv,target_grid.numGridsYOv,target_grid.numGridsXYOv))/threadsPerBlock); clock_gettime(CLOCK_MONOTONIC, &t1); findNumSift<<<numBlocks, threadsPerBlock>>>(d_gridID,d_topOfBucket_ptr,d_gridID2,d_topOfBucket2_ptr,d_gridID3,d_topOfBucket3_ptr,d_gridID4,d_topOfBucket4_ptr,target_grid.numGrids,target_grid.numGridsXOv,target_grid.numGridsYOv,target_grid.numGridsXYOv,d_numSift_ptr,d_numSift2_ptr,d_numSift3_ptr,d_numSift4_ptr); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_numSift= ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //==================== CLUSTERING STEP ======================== // This step deals with finding the epipolar lines for each sift // of the source image and clustering sifts with similar epipolar // lines together. //============================================================== //=================== Find epipolar lines ====================== // The kernel findEpipolarLine finds the epipolar line and stores // stores its points of intersection with the boundaries of the // target image in d_epipolarPoints. //============================================================== //=============== Decide kernel configuration ================== threadsPerBlock = 512; numBlocks = ceil(((float)source.num_keys)/threadsPerBlock); clock_gettime(CLOCK_MONOTONIC, &t1); findEpipolarLine<<<numBlocks, threadsPerBlock>>>(d_fmatrix,d_keys1,source.width,source.height, target.width, target.height,d_epipolarPoints,source.num_keys); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_findEpiline = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //==== Sort the epipolar lines (keys) with siftIDs as values ==== clock_gettime(CLOCK_MONOTONIC, &t1); thrust::device_ptr<long long int> d_epipolarPoints_ptr(d_epipolarPoints); thrust::device_ptr<int> d_source_siftID_ptr(d_source_siftID); thrust::sort_by_key(d_epipolarPoints_ptr, d_epipolarPoints_ptr+source.num_keys,d_source_siftID_ptr); clock_gettime(CLOCK_MONOTONIC, &t2); double time_thrust4 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== // Current scene : // sifts with similar epipolar lines are together now // +----------------------+---+---+---+---+---+---+---+---- // |Epipolar Line end pts | a | b | c | d | e | f | g | ... (sorted) // +----------------------+---+---+---+---+---+---+---+---- // |SiftID | m | n | o | p | q | r | s | ... (random order) // +----------------------+---+---+---+---+---+---+---+---- // An array called cluster(h_cluster and d_cluster) will set the boundaries // If epl[i] - epl[i-1] < threshold then cluster[i] = 0 else cluster[i] = 1 //============================================================== h_cluster[0] = 0; clock_gettime(CLOCK_MONOTONIC, &t1); cudaMemcpy(d_cluster, h_cluster, sizeof(int)*source.num_keys, cudaMemcpyHostToDevice); clock_gettime(CLOCK_MONOTONIC, &t2); double time_memcpy3 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //=== Cluster the epipolarPoints together (similar epipolar lines in one group) === clock_gettime(CLOCK_MONOTONIC, &t1); clusterLines<<<numBlocks, threadsPerBlock>>>(d_epipolarPoints,d_cluster,source.num_keys); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_clusterLines = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); // Scene after clustering : // sifts with similar epipolar lines are together now // +----------------------+---+---+---+---+---+---+---+---- // |Epipolar Line end pts | a | b | c | d | e | f | g | ... (sorted) // +----------------------+---+---+---+---+---+---+---+---- // |SiftID | m | n | o | p | q | r | s | ... (random order) // +----------------------+---+---+---+---+---+---+---+---- // |cluster | 0 | 0 | 1 | 0 | 1 | 1 | 1 | ... // +----------------------+---+---+---+---+---+---+---+---- // Now compute prefix sum to assign cluster ids //============================================================== clock_gettime(CLOCK_MONOTONIC, &t1); thrust::device_ptr<int> d_cluster_ptr(d_cluster); thrust::inclusive_scan(d_cluster_ptr, d_cluster_ptr + source.num_keys, d_cluster_ptr); clock_gettime(CLOCK_MONOTONIC, &t2); double time_thrust5 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //================= current scene example ====================== // // +---------------+---+---+---+---+---+---+---+---- // |id | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 ... // +---------------+---+---+---+---+---+---+---+---- // |siftID | a | b | c | d | e | f | g | h ... // +---------------+---+---+---+---+---+---+---+---- // |h_cluster | 0 | 1 | 1 | 2 | 3 | 3 | 3 | 4 ... // +---------------+---+---+---+---+---+---+---+---- // // so we want cluster location to be like this // // +-----------------+---+---+---+---+---+---+---+---- // |cluster_id | 0 | 1 | 2 | 3 | 4 | ... // +-----------------+---+---+---+---+---+---+---+---- // |cluster_location | 0 | 1 | 3 | 4 | 7 | ... // +-----------------+---+---+---+---+---+---+---+---- // //============================================================== clock_gettime(CLOCK_MONOTONIC, &t1); cudaMemcpy(h_cluster, d_cluster, sizeof(int)*source.num_keys, cudaMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC, &t2); double time_memcpy4 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== // ================= Record Number of Clusters ================ int numClusters = h_cluster[source.num_keys-1]+1; //============================================================== clock_gettime(CLOCK_MONOTONIC, &t1); thrust::device_vector<int> d_clusterLocation(numClusters, -1); d_clusterLocation[h_cluster[0]] = 0; int * d_clusterLocation_ptr = thrust::raw_pointer_cast(d_clusterLocation.data()); clock_gettime(CLOCK_MONOTONIC, &t2); double time_thrust6 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //==================== Find Cluster Location =================== clock_gettime(CLOCK_MONOTONIC, &t1); findClusterLocation<<<numBlocks, threadsPerBlock>>>(d_cluster, source.num_keys,d_clusterLocation_ptr,d_source_siftID,d_clusterID); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_findClusterLocation = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //===================== Matching Step ========================== // // Matching involves the following two kernels : // ----------------------------------------------------------- // // -> FindPotentialMatches which finds eqidistant points on the // epipolar line and the grids those points fall into. Then it // stores the target sifts which are a part of those grids. // These sifts are our potential matches. This is done for each // cluster. // // ----------------------------------------------------------- // // -> FindMatches which, for each source sift, loads the potential // matches and computes the euclidian distances. It then takes // the ratio of minimum and next minimum distance and declares // a match based on some criterias. // //============================================================== // [date : 1 oct 2015] first we do a find potential matches to just find the num of potential matches for each cluster // then we find the max of these num_potential_matches // and then we malloc the d_pot_patches array with that size //================= Decide kernel configuration ================ threadsPerBlock = 256 ; numBlocks = numClusters; //============================================================== thrust::device_vector<int> d_numPotentialMatches(numClusters, 0); int * d_numPotentialMatches_ptr = thrust::raw_pointer_cast(d_numPotentialMatches.data()); clock_gettime(CLOCK_MONOTONIC, &t1); findNumPotentialMatchesForEachCluster<<<numBlocks, threadsPerBlock, 2*threadsPerBlock*sizeof(int)>>>(d_gridID,d_target_siftID,d_topOfBucket_ptr,d_gridID2,d_target_siftID2,d_topOfBucket2_ptr,d_gridID3,d_target_siftID3,d_topOfBucket3_ptr,d_gridID4,d_target_siftID4,d_topOfBucket4_ptr,target_grid.gridSize,d_clusterLocation_ptr,d_epipolarPoints,d_numSift_ptr,d_numSift2_ptr,d_numSift3_ptr,d_numSift4_ptr,target.height, target_grid.numXGrids1,target_grid.halfSize,target_grid.numXGrids2,target_grid.numGridsXOv,target_grid.numGridsYOv,target_grid.numGrids,target_grid.numGridsXYOv,d_numPotentialMatches_ptr); cudaThreadSynchronize(); // for(int i = 0 ; i < d_numPotentialMatches.size(); i++) // std::cout << d_numPotentialMatches[i] << std::endl; thrust::device_vector<int>::iterator d_max = thrust::max_element(d_numPotentialMatches.begin(),d_numPotentialMatches.end()); // int *result = thrust::min_element(d_numPotentialMatches.begin(), d_numPotentialMatches.end()); int maxNumPotentialMatches = *d_max; printf("maxpot = %d\n",maxNumPotentialMatches); clock_gettime(CLOCK_MONOTONIC, &t2); double time_computeNumPotentialMatches = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //================ Malloc d_potentialMatches =================== clock_gettime(CLOCK_MONOTONIC, &t1); int *d_potentialMatches; cudaMalloc((void **)&d_potentialMatches,numClusters*(maxNumPotentialMatches+2)*sizeof(int)); // allocate memory on device clock_gettime(CLOCK_MONOTONIC, &t2); double time_malloc4 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //=========== Find Potential Matches for each cluster ========== clock_gettime(CLOCK_MONOTONIC, &t1); findPotentialMatchesForEachCluster<<<numBlocks, threadsPerBlock, 2*threadsPerBlock*sizeof(int)>>>(d_gridID,d_target_siftID,d_topOfBucket_ptr,d_gridID2,d_target_siftID2,d_topOfBucket2_ptr,d_gridID3,d_target_siftID3,d_topOfBucket3_ptr,d_gridID4,d_target_siftID4,d_topOfBucket4_ptr,target_grid.gridSize,d_clusterLocation_ptr,d_epipolarPoints,d_numSift_ptr,d_numSift2_ptr,d_numSift3_ptr,d_numSift4_ptr,d_potentialMatches,target.height, target_grid.numXGrids1,target_grid.halfSize,target_grid.numXGrids2,target_grid.numGridsXOv,target_grid.numGridsYOv,target_grid.numGrids,target_grid.numGridsXYOv,maxNumPotentialMatches); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_computePotentialMatches = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== // Declare matches array which stores the matching siftID // ( or -1 if no match found) for each query sift clock_gettime(CLOCK_MONOTONIC, &t1); int *match = (int *)malloc(sizeof(int)*source.num_keys); int *d_matches_ptr; cudaMalloc((void**)&d_matches_ptr, sizeof(int)*source.num_keys); for(i = 0 ; i < source.num_keys; i++) { match[i] = -1; } cudaMemcpy(d_matches_ptr, match, sizeof(int)*source.num_keys, cudaMemcpyHostToDevice); int *h_potentialMatches = (int *)malloc(numClusters*(2+maxNumPotentialMatches)*sizeof(int)); // allocate memory on device cudaMemcpy(h_potentialMatches , d_potentialMatches,numClusters*(maxNumPotentialMatches+2)*sizeof(int) , cudaMemcpyDeviceToHost); /* for(int i = 0 ; i < numClusters; i++) { if(h_potentialMatches[maxNumPotentialMatches*i] != 0) printf("numpotm = %d\n",h_potentialMatches[maxNumPotentialMatches*i]); } */ // thrust::device_vector<int> d_matches(source.num_keys, -1); // int * d_matches_ptr = thrust::raw_pointer_cast(d_matches.data()); clock_gettime(CLOCK_MONOTONIC, &t2); double time_thrust3 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //================= Decide kernel configuration ================ threadsPerBlock = 256 ; numBlocks = source.num_keys; //============================================================== //======================== Find Matches ======================== clock_gettime(CLOCK_MONOTONIC, &t1); findMatches<<<numBlocks, threadsPerBlock,4*threadsPerBlock*sizeof(int)>>>(d_matches_ptr,d_clusterID,d_potentialMatches,maxNumPotentialMatches); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &t2); double time_computeMatches = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== cudaMemcpy(match, d_matches_ptr, sizeof(int)*source.num_keys, cudaMemcpyDeviceToHost); //====================== memcpy ============================== clock_gettime(CLOCK_MONOTONIC, &t1); // thrust::host_vector<int> h_matches = d_matches; clock_gettime(CLOCK_MONOTONIC, &t2); double time_memcpy5 = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //======================== CLEAN UP ============================ //===================== bind the textures ==================== clock_gettime(CLOCK_MONOTONIC, &t1); cudaUnbindTexture (tex_targetDescriptor); cudaUnbindTexture (tex_sourceDescriptor); clock_gettime(CLOCK_MONOTONIC, &t2); double time_unbindtex = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); //============================================================== //===================== Free device memory ===================== clock_gettime(CLOCK_MONOTONIC, &t1); // cudaFree(d_keys1); // cudaFree(d_keys2); // cudaFree(d_source_x); // cudaFree(d_source_y); // cudaFree(d_target_x); // cudaFree(d_target_y); //// cudaFree(d_source_keypoints); //// cudaFree(d_target_keypoints); cudaFree(d_source_siftID); cudaFree(d_target_siftID); cudaFree(d_target_siftID1); cudaFree(d_target_siftID2); cudaFree(d_target_siftID3); cudaFree(d_target_siftID4); cudaFree(d_fmatrix); cudaFree(d_gridID); cudaFree(d_gridID1); cudaFree(d_gridID2); cudaFree(d_gridID3); cudaFree(d_gridID4); cudaFree(d_epipolarPoints); cudaFree(d_cluster); cudaFree(d_clusterID); cudaFree(d_potentialMatches); cudaFree(d_matches_ptr); //======================== Free host memory ==================== /* free(h_target_siftID); free(h_gridID); free(target_x); free(target_y); free(target_scale); free(target_orient); free(target_keypoints); free(h_source_siftID); free(source_x); free(source_y); free(source_scale); free(source_orient); free(source_keypoints); */ free(h_cluster); free(h_epipolarPoints); free(h_gridID); free(h_potentialMatches); clock_gettime(CLOCK_MONOTONIC, &t2); double time_freemem = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); clock_gettime(CLOCK_MONOTONIC, &t4); double time_total = ((t4.tv_sec - t3.tv_sec)*1000) + (((double)(t4.tv_nsec - t3.tv_nsec))/1000000.0); double time_gpu = ((t4.tv_sec - t5.tv_sec)*1000) + (((double)(t4.tv_nsec - t5.tv_nsec))/1000000.0); //============================================================== int num_matches = 0; for(i = 0 ; i < source.num_keys; i++) { if(match[i]!=-1) { num_matches++; matches.push_back(std::make_pair(i,match[i])); } } free(match); //============================================================== /* std::cout << "Time on GPU init: " << time_gpu_init << std::endl; std::cout << "Time on CPU setup: " << time_total_cpu_alloc << std::endl; std::cout << "Time - memcpy : " << time_memcpy1+time_memcpy3+time_memcpy4+time_memcpy5 << std::endl; printf("time_memcpy1 %lf, time_memcpy3 %lf , time_memcpy4 %lf, time_memcpy5 %lf\n",time_memcpy1,time_memcpy3,time_memcpy4,time_memcpy5); std::cout << "Time - malloc : " << time_malloc1+time_malloc4 << std::endl; std::cout << "Time to compute gridID : " << time_computeGridID << std::endl; std::cout << "Time - thrust : " << time_thrust1+time_thrust2+time_thrust3+time_thrust4+time_thrust5+time_thrust6 << std::endl; std::cout << "Time to compute topOfBucket : " << time_topOfBucket << std::endl; std::cout << "Time to find num sifts per grid : " << time_numSift << std::endl; std::cout << "Time to find epipolar lines : " << time_findEpiline << std::endl; std::cout << "Time to cluster lines : " << time_clusterLines << std::endl; std::cout << "Time to find cluster location : " << time_findClusterLocation << std::endl; std::cout << "Time to compute num potential matches per cluster: " << time_computeNumPotentialMatches << std::endl; std::cout << "Time to compute potential matches per cluster: " << time_computePotentialMatches << std::endl; std::cout << "Time to compute matches: " << time_computeMatches << std::endl; std::cout << "Time to unbind textures : " << time_unbindtex << std::endl; std::cout << "Time to free memory : " << time_freemem << std::endl; std::cout << "total time on gpu: " << time_gpu << std::endl; std::cout << "total time : " << time_total << std::endl; */ std::cout << "Total matches found : " << num_matches << std::endl; std::cout << time_gpu << std::endl; return time_gpu ; //============================================================== }
f2cb6466613cd9af469300b12c9cadaa08a4e111.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "my_cuda.cu" #include "common/cpu_anim.h" constexpr int N = 1024 * 33; constexpr int DIM = 1024; struct DataBlock { byte *device_bitmap; CPUAnimBitmap *bitmap; }; void cleanup(DataBlock *db) { hipFree(db->device_bitmap); } __global__ void add(int *a, int *b, int *c) { //int threadId = threadIdx.x; //int threadId = blockIdx.x; int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; while (threadId < N) { c[threadId] = a[threadId] + b[threadId]; threadId += blockDim.x * gridDim.x; } } __global__ void kernel(byte *ptr, int ticks) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int offset = (blockDim.x * gridDim.x * y) + x; float fx = x - DIM / 2; float fy = y - DIM / 2; float d = sqrtf(fx * fx + fy * fy); byte gray = (byte)(128.0f + 127.0f * cos(d / 10.0f - ticks / 7.0f) / (d / 10.0f + 1.0f)); //float grayF = (128.0f + 127.0f * cos(d / 10.0f - ticks / 7.0f) / (d / 10.0f + 1.0f)); ptr[offset * 4 + 0] = 0; ptr[offset * 4 + 1] = 0; ptr[offset * 4 + 2] = gray; ptr[offset * 4 + 3] = 255; } void generate_frame(DataBlock *db, int ticks) { dim3 blocks(DIM / 16, DIM / 16); dim3 threads(16, 16); hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads), 0, 0, db->device_bitmap, ticks); hipMemcpy(db->bitmap->get_ptr(), db->device_bitmap, db->bitmap->image_size(), hipMemcpyDeviceToHost); } int main(int, char **) { /* int a[N]; int b[N]; int *device_a, *device_b, *device_c; HANDLE_ERROR(hipMalloc((void **)&device_a, N * sizeof(int))); HANDLE_ERROR(hipMalloc((void **)&device_b, N * sizeof(int))); HANDLE_ERROR(hipMalloc((void **)&device_c, N * sizeof(int))); for (size_t i = 0; i < N; i++) { a[i] = i * i; b[i] = i + i; } HANDLE_ERROR(hipMemcpy(device_a, a, N * sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(device_b, b, N * sizeof(int), hipMemcpyHostToDevice)); // Calling N blocks. //add<<<N, 1>>>(device_a, device_b, device_c); // Calling N threads. add<<<128, 128>>>(device_a, device_b, device_c); int c[N]; HANDLE_ERROR(hipMemcpy(c, device_c, N * sizeof(int), hipMemcpyDeviceToHost)); for (size_t i = N - 100; i < N; i++) { printf("%i + %i = %i\n", a[i], b[i], c[i]); } hipFree(device_a); hipFree(device_b); hipFree(device_c); */ DataBlock data; CPUAnimBitmap bitmap(DIM, DIM, &data); data.bitmap = &bitmap; hipMalloc((void **)&data.device_bitmap, bitmap.image_size()); bitmap.anim_and_exit((void (*)(void *, int))generate_frame, (void (*)(void *))cleanup); return 0; }
f2cb6466613cd9af469300b12c9cadaa08a4e111.cu
#include "my_cuda.cu" #include "common/cpu_anim.h" constexpr int N = 1024 * 33; constexpr int DIM = 1024; struct DataBlock { byte *device_bitmap; CPUAnimBitmap *bitmap; }; void cleanup(DataBlock *db) { cudaFree(db->device_bitmap); } __global__ void add(int *a, int *b, int *c) { //int threadId = threadIdx.x; //int threadId = blockIdx.x; int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; while (threadId < N) { c[threadId] = a[threadId] + b[threadId]; threadId += blockDim.x * gridDim.x; } } __global__ void kernel(byte *ptr, int ticks) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int offset = (blockDim.x * gridDim.x * y) + x; float fx = x - DIM / 2; float fy = y - DIM / 2; float d = sqrtf(fx * fx + fy * fy); byte gray = (byte)(128.0f + 127.0f * cos(d / 10.0f - ticks / 7.0f) / (d / 10.0f + 1.0f)); //float grayF = (128.0f + 127.0f * cos(d / 10.0f - ticks / 7.0f) / (d / 10.0f + 1.0f)); ptr[offset * 4 + 0] = 0; ptr[offset * 4 + 1] = 0; ptr[offset * 4 + 2] = gray; ptr[offset * 4 + 3] = 255; } void generate_frame(DataBlock *db, int ticks) { dim3 blocks(DIM / 16, DIM / 16); dim3 threads(16, 16); kernel<<<blocks, threads>>>(db->device_bitmap, ticks); cudaMemcpy(db->bitmap->get_ptr(), db->device_bitmap, db->bitmap->image_size(), cudaMemcpyDeviceToHost); } int main(int, char **) { /* int a[N]; int b[N]; int *device_a, *device_b, *device_c; HANDLE_ERROR(cudaMalloc((void **)&device_a, N * sizeof(int))); HANDLE_ERROR(cudaMalloc((void **)&device_b, N * sizeof(int))); HANDLE_ERROR(cudaMalloc((void **)&device_c, N * sizeof(int))); for (size_t i = 0; i < N; i++) { a[i] = i * i; b[i] = i + i; } HANDLE_ERROR(cudaMemcpy(device_a, a, N * sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(device_b, b, N * sizeof(int), cudaMemcpyHostToDevice)); // Calling N blocks. //add<<<N, 1>>>(device_a, device_b, device_c); // Calling N threads. add<<<128, 128>>>(device_a, device_b, device_c); int c[N]; HANDLE_ERROR(cudaMemcpy(c, device_c, N * sizeof(int), cudaMemcpyDeviceToHost)); for (size_t i = N - 100; i < N; i++) { printf("%i + %i = %i\n", a[i], b[i], c[i]); } cudaFree(device_a); cudaFree(device_b); cudaFree(device_c); */ DataBlock data; CPUAnimBitmap bitmap(DIM, DIM, &data); data.bitmap = &bitmap; cudaMalloc((void **)&data.device_bitmap, bitmap.image_size()); bitmap.anim_and_exit((void (*)(void *, int))generate_frame, (void (*)(void *))cleanup); return 0; }
ae6ce2af7ed88170053c15489d9a4aef8e9c2e22.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void transpose(float *odata, float *idata, int width, int height) { __shared__ float tile[BLOCK_DIM][BLOCK_DIM + 1]; int xIndex = blockIdx.x * BLOCK _DIM + threadIdx.x; int yIndex = blockIdx.y * BLOCK _DIM + threadIdx.y; if (xIndex<width && yIndex<height) { int index_in = xIndex + (yIndex)*width; tile[threadIdx.y][threadIdx.x] = idata[index_in]; } __syncthreads(); int xIndex_new = blockIdx.y * BLOCK _DIM + threadIdx.x; int yIndex_new = blockIdx.x * BLOCK _DIM + threadIdx.y; if (xIndex_new<height && yIndex_new<width) { int index_out = xIndex_new + (yIndex_new)*height; odata[index_out] = tile[threadIdx.x][threadIdx.y]; } }
ae6ce2af7ed88170053c15489d9a4aef8e9c2e22.cu
__global__ void transpose(float *odata, float *idata, int width, int height) { __shared__ float tile[BLOCK_DIM][BLOCK_DIM + 1]; int xIndex = blockIdx.x * BLOCK _DIM + threadIdx.x; int yIndex = blockIdx.y * BLOCK _DIM + threadIdx.y; if (xIndex<width && yIndex<height) { int index_in = xIndex + (yIndex)*width; tile[threadIdx.y][threadIdx.x] = idata[index_in]; } __syncthreads(); int xIndex_new = blockIdx.y * BLOCK _DIM + threadIdx.x; int yIndex_new = blockIdx.x * BLOCK _DIM + threadIdx.y; if (xIndex_new<height && yIndex_new<width) { int index_out = xIndex_new + (yIndex_new)*height; odata[index_out] = tile[threadIdx.x][threadIdx.y]; } }
a0cbaf47e5097858811aa54e78bda5d8932ec345.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // This sample is an implementation of a simple line-of-sight algorithm: // Given a height map and a ray originating at some observation point, // it computes all the points along the ray that are visible from the // observation point. // It is based on the description made in "Guy E. Blelloch. Vector models // for data-parallel computing. MIT Press, 1990" and uses open source CUDA // Thrust Library #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> // includes, project #include <cutil_inline.h> #include <cutil_math.h> #include <shrQATest.h> // includes, library #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include <thrust/copy.h> //////////////////////////////////////////////////////////////////////////////// // declaration, types // Boolean typedef unsigned char Bool; enum { False = 0, True = 1 }; // 2D height field struct HeightField { int width; float* height; }; // Ray struct Ray { float3 origin; float2 dir; int length; float oneOverLength; }; //////////////////////////////////////////////////////////////////////////////// // declaration, variables // Height field texture reference texture<float, 2, hipReadModeElementType> g_HeightFieldTex; //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); __global__ void computeAngles_kernel(const Ray, float*); __global__ void computeVisibilities_kernel(const float*, const float*, int, Bool*); void lineOfSight_gold(const HeightField, const Ray, Bool*); __device__ __host__ float2 getLocation(const Ray, int); __device__ __host__ float getAngle(const Ray, float2, float); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a line-of-sight test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { //////////////////////////////////////////////////////////////////////////// // Device initialization shrQAStart(argc, argv); // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else hipSetDevice( cutGetMaxGflopsDeviceId() ); //////////////////////////////////////////////////////////////////////////// // Timer // Create uint timer; cutilCheckError(cutCreateTimer(&timer)); // Number of iterations to get accurate timing uint numIterations = 1; //////////////////////////////////////////////////////////////////////////// // Height field HeightField heightField; // Allocate in host memory int2 dim = make_int2(10000, 100); heightField.width = dim.x; thrust::host_vector<float> height(dim.x * dim.y); heightField.height = (float*)&height[0]; // // Fill in with an arbitrary sine surface for (int x = 0; x < dim.x; ++x) for (int y = 0; y < dim.y; ++y) { float amp = 0.1f * (x + y); float period = 2.0f + amp; *(heightField.height + dim.x * y + x) = amp * (sinf(sqrtf((float)(x * x + y * y)) * 2.0f * 3.1416f / period) + 1.0f); } // Allocate CUDA array in device memory hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipArray* heightFieldArray; cutilSafeCall(hipMallocArray(&heightFieldArray, &channelDesc, dim.x, dim.y)); // Initialize device memory cutilSafeCall(hipMemcpyToArray(heightFieldArray, 0, 0, heightField.height, dim.x * dim.y * sizeof(float), hipMemcpyHostToDevice)); // Set texture parameters g_HeightFieldTex.addressMode[0] = hipAddressModeClamp; g_HeightFieldTex.addressMode[1] = hipAddressModeClamp; g_HeightFieldTex.filterMode = hipFilterModePoint; g_HeightFieldTex.normalized = 0; // Bind CUDA array to texture reference cutilSafeCall(hipBindTextureToArray(g_HeightFieldTex, heightFieldArray, channelDesc)); //////////////////////////////////////////////////////////////////////////// // Ray (starts at origin and traverses the height field diagonally) Ray ray; ray.origin = make_float3(0, 0, 2.0f); int2 dir = make_int2(dim.x - 1, dim.y - 1); ray.dir = make_float2((float)dir.x, (float)dir.y); ray.length = max(abs(dir.x), abs(dir.y)); ray.oneOverLength = 1.0f / ray.length; //////////////////////////////////////////////////////////////////////////// // View angles // Allocate view angles for each point along the ray thrust::device_vector<float> d_angles(ray.length); // Allocate result of max-scan operation on the array of view angles thrust::device_vector<float> d_scannedAngles(ray.length); //////////////////////////////////////////////////////////////////////////// // Visibility results // Allocate visibility results for each point along the ray thrust::device_vector<Bool> d_visibilities(ray.length); thrust::host_vector<Bool> h_visibilities(ray.length); thrust::host_vector<Bool> h_visibilitiesRef(ray.length); //////////////////////////////////////////////////////////////////////////// // Reference solution lineOfSight_gold(heightField, ray, (Bool*)&h_visibilitiesRef[0]); //////////////////////////////////////////////////////////////////////////// // Device solution // Execution configuration dim3 block(256); dim3 grid((uint)ceil(ray.length / (double)block.x)); // Compute device solution printf("Line of sight\n"); cutStartTimer(timer); for (uint i = 0; i < numIterations; ++i) { // Compute view angle for each point along the ray hipLaunchKernelGGL(( computeAngles_kernel), dim3(grid), dim3(block), 0, 0, ray, thrust::raw_pointer_cast(&d_angles[0])); cutilCheckMsg("Kernel execution failed"); // Perform a max-scan operation on the array of view angles thrust::inclusive_scan(d_angles.begin(), d_angles.end(), d_scannedAngles.begin(), thrust::maximum<float>()); cutilCheckMsg("Kernel execution failed"); // Compute visibility results based on the array of view angles // and its scanned version hipLaunchKernelGGL(( computeVisibilities_kernel), dim3(grid), dim3(block), 0, 0, thrust::raw_pointer_cast(&d_angles[0]), thrust::raw_pointer_cast(&d_scannedAngles[0]), ray.length, thrust::raw_pointer_cast(&d_visibilities[0])); cutilCheckMsg("Kernel execution failed"); } cutilDeviceSynchronize(); cutStopTimer(timer); cutilCheckMsg("Kernel execution failed"); // Copy visibility results back to the host thrust::copy(d_visibilities.begin(), d_visibilities.end(), h_visibilities.begin()); // Compare device visibility results against reference results CUTBoolean res = cutCompareub(thrust::raw_pointer_cast(&h_visibilitiesRef[0]), thrust::raw_pointer_cast(&h_visibilities[0]), ray.length); printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); cutResetTimer(timer); // Cleanup memory cutilSafeCall(hipFreeArray(heightFieldArray)); cutilDeviceReset(); shrQAFinishExit(argc, (const char **)argv, (1 == res) ? QA_PASSED : QA_FAILED); } //////////////////////////////////////////////////////////////////////////////// //! Compute view angles for each point along the ray //! @param ray ray //! @param angles view angles //////////////////////////////////////////////////////////////////////////////// __global__ void computeAngles_kernel(const Ray ray, float* angles) { uint i = blockDim.x * blockIdx.x + threadIdx.x; if (i < ray.length) { float2 location = getLocation(ray, i + 1); float height = tex2D(g_HeightFieldTex, location.x, location.y); float angle = getAngle(ray, location, height); angles[i] = angle; } } //////////////////////////////////////////////////////////////////////////////// //! Compute visibility for each point along the ray //! @param angles view angles //! @param scannedAngles max-scanned view angles //! @param numAngles number of view angles //! @param visibilities boolean array indicating the visibility of each point //! along the ray //////////////////////////////////////////////////////////////////////////////// __global__ void computeVisibilities_kernel(const float* angles, const float* scannedAngles, int numAngles, Bool* visibilities) { uint i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numAngles) visibilities[i] = scannedAngles[i] <= angles[i]; } //////////////////////////////////////////////////////////////////////////////// //! Compute reference data set //! @param heightField height field //! @param ray ray //! @param visibilities boolean array indicating the visibility of each point //! along the ray //////////////////////////////////////////////////////////////////////////////// void lineOfSight_gold(const HeightField heightField, const Ray ray, Bool* visibilities) { float angleMax = asinf(-1.0f); for (int i = 0; i < ray.length; ++i) { float2 location = getLocation(ray, i + 1); float height = *(heightField.height + heightField.width * (int)floorf(location.y) + (int)floorf(location.x)); float angle = getAngle(ray, location, height); if (angle > angleMax) { angleMax = angle; visibilities[i] = True; } else visibilities[i] = False; } } //////////////////////////////////////////////////////////////////////////////// //! Compute the 2D coordinates of the point located at i steps from the origin //! of the ray //! @param ray ray //! @param i integer offset along the ray //////////////////////////////////////////////////////////////////////////////// __device__ __host__ float2 getLocation(const Ray ray, int i) { float step = i * ray.oneOverLength; return make_float2(ray.origin.x, ray.origin.y) + step * ray.dir; } //////////////////////////////////////////////////////////////////////////////// //! Compute the angle of view between a 3D point and the origin of the ray //! @param ray ray //! @param location 2D coordinates of the input point //! @param height height of the input point //////////////////////////////////////////////////////////////////////////////// __device__ __host__ float getAngle(const Ray ray, float2 location, float height) { float2 dir = location - make_float2(ray.origin.x, ray.origin.y); return atanf((height - ray.origin.z) / length(dir)); }
a0cbaf47e5097858811aa54e78bda5d8932ec345.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // This sample is an implementation of a simple line-of-sight algorithm: // Given a height map and a ray originating at some observation point, // it computes all the points along the ray that are visible from the // observation point. // It is based on the description made in "Guy E. Blelloch. Vector models // for data-parallel computing. MIT Press, 1990" and uses open source CUDA // Thrust Library #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> // includes, project #include <cutil_inline.h> #include <cutil_math.h> #include <shrQATest.h> // includes, library #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include <thrust/copy.h> //////////////////////////////////////////////////////////////////////////////// // declaration, types // Boolean typedef unsigned char Bool; enum { False = 0, True = 1 }; // 2D height field struct HeightField { int width; float* height; }; // Ray struct Ray { float3 origin; float2 dir; int length; float oneOverLength; }; //////////////////////////////////////////////////////////////////////////////// // declaration, variables // Height field texture reference texture<float, 2, cudaReadModeElementType> g_HeightFieldTex; //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); __global__ void computeAngles_kernel(const Ray, float*); __global__ void computeVisibilities_kernel(const float*, const float*, int, Bool*); void lineOfSight_gold(const HeightField, const Ray, Bool*); __device__ __host__ float2 getLocation(const Ray, int); __device__ __host__ float getAngle(const Ray, float2, float); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a line-of-sight test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { //////////////////////////////////////////////////////////////////////////// // Device initialization shrQAStart(argc, argv); // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else cudaSetDevice( cutGetMaxGflopsDeviceId() ); //////////////////////////////////////////////////////////////////////////// // Timer // Create uint timer; cutilCheckError(cutCreateTimer(&timer)); // Number of iterations to get accurate timing uint numIterations = 1; //////////////////////////////////////////////////////////////////////////// // Height field HeightField heightField; // Allocate in host memory int2 dim = make_int2(10000, 100); heightField.width = dim.x; thrust::host_vector<float> height(dim.x * dim.y); heightField.height = (float*)&height[0]; // // Fill in with an arbitrary sine surface for (int x = 0; x < dim.x; ++x) for (int y = 0; y < dim.y; ++y) { float amp = 0.1f * (x + y); float period = 2.0f + amp; *(heightField.height + dim.x * y + x) = amp * (sinf(sqrtf((float)(x * x + y * y)) * 2.0f * 3.1416f / period) + 1.0f); } // Allocate CUDA array in device memory cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaArray* heightFieldArray; cutilSafeCall(cudaMallocArray(&heightFieldArray, &channelDesc, dim.x, dim.y)); // Initialize device memory cutilSafeCall(cudaMemcpyToArray(heightFieldArray, 0, 0, heightField.height, dim.x * dim.y * sizeof(float), cudaMemcpyHostToDevice)); // Set texture parameters g_HeightFieldTex.addressMode[0] = cudaAddressModeClamp; g_HeightFieldTex.addressMode[1] = cudaAddressModeClamp; g_HeightFieldTex.filterMode = cudaFilterModePoint; g_HeightFieldTex.normalized = 0; // Bind CUDA array to texture reference cutilSafeCall(cudaBindTextureToArray(g_HeightFieldTex, heightFieldArray, channelDesc)); //////////////////////////////////////////////////////////////////////////// // Ray (starts at origin and traverses the height field diagonally) Ray ray; ray.origin = make_float3(0, 0, 2.0f); int2 dir = make_int2(dim.x - 1, dim.y - 1); ray.dir = make_float2((float)dir.x, (float)dir.y); ray.length = max(abs(dir.x), abs(dir.y)); ray.oneOverLength = 1.0f / ray.length; //////////////////////////////////////////////////////////////////////////// // View angles // Allocate view angles for each point along the ray thrust::device_vector<float> d_angles(ray.length); // Allocate result of max-scan operation on the array of view angles thrust::device_vector<float> d_scannedAngles(ray.length); //////////////////////////////////////////////////////////////////////////// // Visibility results // Allocate visibility results for each point along the ray thrust::device_vector<Bool> d_visibilities(ray.length); thrust::host_vector<Bool> h_visibilities(ray.length); thrust::host_vector<Bool> h_visibilitiesRef(ray.length); //////////////////////////////////////////////////////////////////////////// // Reference solution lineOfSight_gold(heightField, ray, (Bool*)&h_visibilitiesRef[0]); //////////////////////////////////////////////////////////////////////////// // Device solution // Execution configuration dim3 block(256); dim3 grid((uint)ceil(ray.length / (double)block.x)); // Compute device solution printf("Line of sight\n"); cutStartTimer(timer); for (uint i = 0; i < numIterations; ++i) { // Compute view angle for each point along the ray computeAngles_kernel<<<grid, block>>>(ray, thrust::raw_pointer_cast(&d_angles[0])); cutilCheckMsg("Kernel execution failed"); // Perform a max-scan operation on the array of view angles thrust::inclusive_scan(d_angles.begin(), d_angles.end(), d_scannedAngles.begin(), thrust::maximum<float>()); cutilCheckMsg("Kernel execution failed"); // Compute visibility results based on the array of view angles // and its scanned version computeVisibilities_kernel<<<grid, block>>>(thrust::raw_pointer_cast(&d_angles[0]), thrust::raw_pointer_cast(&d_scannedAngles[0]), ray.length, thrust::raw_pointer_cast(&d_visibilities[0])); cutilCheckMsg("Kernel execution failed"); } cutilDeviceSynchronize(); cutStopTimer(timer); cutilCheckMsg("Kernel execution failed"); // Copy visibility results back to the host thrust::copy(d_visibilities.begin(), d_visibilities.end(), h_visibilities.begin()); // Compare device visibility results against reference results CUTBoolean res = cutCompareub(thrust::raw_pointer_cast(&h_visibilitiesRef[0]), thrust::raw_pointer_cast(&h_visibilities[0]), ray.length); printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); cutResetTimer(timer); // Cleanup memory cutilSafeCall(cudaFreeArray(heightFieldArray)); cutilDeviceReset(); shrQAFinishExit(argc, (const char **)argv, (1 == res) ? QA_PASSED : QA_FAILED); } //////////////////////////////////////////////////////////////////////////////// //! Compute view angles for each point along the ray //! @param ray ray //! @param angles view angles //////////////////////////////////////////////////////////////////////////////// __global__ void computeAngles_kernel(const Ray ray, float* angles) { uint i = blockDim.x * blockIdx.x + threadIdx.x; if (i < ray.length) { float2 location = getLocation(ray, i + 1); float height = tex2D(g_HeightFieldTex, location.x, location.y); float angle = getAngle(ray, location, height); angles[i] = angle; } } //////////////////////////////////////////////////////////////////////////////// //! Compute visibility for each point along the ray //! @param angles view angles //! @param scannedAngles max-scanned view angles //! @param numAngles number of view angles //! @param visibilities boolean array indicating the visibility of each point //! along the ray //////////////////////////////////////////////////////////////////////////////// __global__ void computeVisibilities_kernel(const float* angles, const float* scannedAngles, int numAngles, Bool* visibilities) { uint i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numAngles) visibilities[i] = scannedAngles[i] <= angles[i]; } //////////////////////////////////////////////////////////////////////////////// //! Compute reference data set //! @param heightField height field //! @param ray ray //! @param visibilities boolean array indicating the visibility of each point //! along the ray //////////////////////////////////////////////////////////////////////////////// void lineOfSight_gold(const HeightField heightField, const Ray ray, Bool* visibilities) { float angleMax = asinf(-1.0f); for (int i = 0; i < ray.length; ++i) { float2 location = getLocation(ray, i + 1); float height = *(heightField.height + heightField.width * (int)floorf(location.y) + (int)floorf(location.x)); float angle = getAngle(ray, location, height); if (angle > angleMax) { angleMax = angle; visibilities[i] = True; } else visibilities[i] = False; } } //////////////////////////////////////////////////////////////////////////////// //! Compute the 2D coordinates of the point located at i steps from the origin //! of the ray //! @param ray ray //! @param i integer offset along the ray //////////////////////////////////////////////////////////////////////////////// __device__ __host__ float2 getLocation(const Ray ray, int i) { float step = i * ray.oneOverLength; return make_float2(ray.origin.x, ray.origin.y) + step * ray.dir; } //////////////////////////////////////////////////////////////////////////////// //! Compute the angle of view between a 3D point and the origin of the ray //! @param ray ray //! @param location 2D coordinates of the input point //! @param height height of the input point //////////////////////////////////////////////////////////////////////////////// __device__ __host__ float getAngle(const Ray ray, float2 location, float height) { float2 dir = location - make_float2(ray.origin.x, ray.origin.y); return atanf((height - ray.origin.z) / length(dir)); }
248d08da4b1d0c1a4f39b49a0fe8363621cd7842.hip
// !!! This is a file automatically generated by hipify!!! #include "linear_iso_elastic_kernel.hpp" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "yuzu/domain/analyses/ReducedNumericalModel.hpp" #include "yuzu/domain/elements/ElementData.hpp" #include "yuzu/domain/elements/ElementGeometry.hpp" #include "yuzu/domain/elements/FiniteElement.hpp" #include "yuzu/domain/integration/IntegrationPoint.hpp" #include "yuzu/domain/physics/InfinitesimalState.hpp" #include "yuzu/domain/physics/UpdatedPhysicalState.hpp" #include "yuzu/foundation/memory/RelativePointer.hpp" #include "yuzu/foundation/memory/pointer.hpp" #include "yuzu/foundation/blas/vector_operations.hpp" #include "yuzu/utils/kernel_utils.hpp" #include "yuzu/common/gpu.hpp" namespace afm = axis::foundation::memory; namespace ay = axis::yuzu; namespace ayda = axis::yuzu::domain::analyses; namespace ayde = axis::yuzu::domain::elements; namespace aydi = axis::yuzu::domain::integration; namespace aydp = axis::yuzu::domain::physics; namespace ayfb = axis::yuzu::foundation::blas; namespace ayfm = axis::yuzu::foundation::memory; __device__ void UpdateStress(const real *materialTensor, aydp::UpdatedPhysicalState& state, const aydp::InfinitesimalState& lastState) { for (int i = 0; i < 6; i++) { real x = 0; for (int j = 0; j < 6; j++) { x += materialTensor[6*i + j] * lastState.LastStrainIncrement()(j); } state.LastStressIncrement()(i) = x; state.Stress()(i) += x; } } __global__ void __launch_bounds__(AXIS_YUZU_MAX_THREADS_PER_BLOCK) RunLinearIsoElasticKernel( uint64 numThreadsToUse, uint64 startIndex, void *baseMemoryAddressOnGPU, void * streamPtr, uint64 elementBlockSize, ayfm::RelativePointer reducedModelPtr, real currentTime, real lastTimeIncrement, real nextTimeIncrement ) { using axis::yabsref; using axis::yabsptr; uint64 baseIndex = ay::GetBaseThreadIndex(gridDim, blockIdx, blockDim, threadIdx); if (!ay::IsActiveThread(baseIndex + startIndex, numThreadsToUse)) return; ayde::ElementData eData(baseMemoryAddressOnGPU, baseIndex, elementBlockSize); ayda::ReducedNumericalModel& model = yabsref<ayda::ReducedNumericalModel>(reducedModelPtr); // get FE geometry data uint64 elementId = eData.GetId(); ayde::FiniteElement &element = model.GetElement((size_type)elementId); ayde::ElementGeometry& g = element.Geometry(); aydp::InfinitesimalState& elementState = element.PhysicalState(); const real *materialTensor = eData.MaterialTensor(); int pointCount = g.GetIntegrationPointCount(); if (pointCount > 0) { elementState.Stress().ClearAll(); elementState.LastStressIncrement().ClearAll(); for (int pointIdx = 0; pointIdx < pointCount; ++pointIdx) { aydi::IntegrationPoint& p = g.GetIntegrationPoint(pointIdx); aydp::InfinitesimalState& pointState = p.State(); // call stress update function aydp::UpdatedPhysicalState ups(pointState); UpdateStress(materialTensor, ups, pointState); // add parcel to element mean stress ayfb::VectorSum(elementState.Stress(), 1.0, elementState.Stress(), 1.0, pointState.Stress()); ayfb::VectorSum(elementState.LastStressIncrement(), 1.0, elementState.LastStressIncrement(), 1.0, pointState.LastStressIncrement()); } // element stress is the average of all parcels elementState.Stress().Scale(1.0 / (real)pointCount); elementState.LastStressIncrement().Scale(1.0 / (real)pointCount); } else { // formulation does not use integration point; // just call stress update function directly on it aydp::UpdatedPhysicalState ups(elementState); UpdateStress(materialTensor, ups, elementState); } } extern void axis::domain::materials::linear_iso_commands::RunLinearIsoElasticOnGPU( uint64 numThreadsToUse, uint64 startIndex, void *baseMemoryAddressOnGPU, const axis::Dimension3D& gridDim, const axis::Dimension3D& blockDim, void * streamPtr, uint64 elementBlockSize, afm::RelativePointer& reducedModelPtr, real currentTime, real lastTimeIncrement, real nextTimeIncrement ) { dim3 grid, block; grid.x = gridDim.X; grid.y = gridDim.Y; grid.z = gridDim.Z; block.x = blockDim.X; block.y = blockDim.Y; block.z = blockDim.Z; hipLaunchKernelGGL(( RunLinearIsoElasticKernel), dim3(grid), dim3(block), 0, (hipStream_t)streamPtr, numThreadsToUse, startIndex, baseMemoryAddressOnGPU, streamPtr, elementBlockSize, reinterpret_cast<ayfm::RelativePointer&>(reducedModelPtr), currentTime, lastTimeIncrement, nextTimeIncrement); }
248d08da4b1d0c1a4f39b49a0fe8363621cd7842.cu
#include "linear_iso_elastic_kernel.hpp" #include <cuda.h> #include <cuda_runtime.h> #include "yuzu/domain/analyses/ReducedNumericalModel.hpp" #include "yuzu/domain/elements/ElementData.hpp" #include "yuzu/domain/elements/ElementGeometry.hpp" #include "yuzu/domain/elements/FiniteElement.hpp" #include "yuzu/domain/integration/IntegrationPoint.hpp" #include "yuzu/domain/physics/InfinitesimalState.hpp" #include "yuzu/domain/physics/UpdatedPhysicalState.hpp" #include "yuzu/foundation/memory/RelativePointer.hpp" #include "yuzu/foundation/memory/pointer.hpp" #include "yuzu/foundation/blas/vector_operations.hpp" #include "yuzu/utils/kernel_utils.hpp" #include "yuzu/common/gpu.hpp" namespace afm = axis::foundation::memory; namespace ay = axis::yuzu; namespace ayda = axis::yuzu::domain::analyses; namespace ayde = axis::yuzu::domain::elements; namespace aydi = axis::yuzu::domain::integration; namespace aydp = axis::yuzu::domain::physics; namespace ayfb = axis::yuzu::foundation::blas; namespace ayfm = axis::yuzu::foundation::memory; __device__ void UpdateStress(const real *materialTensor, aydp::UpdatedPhysicalState& state, const aydp::InfinitesimalState& lastState) { for (int i = 0; i < 6; i++) { real x = 0; for (int j = 0; j < 6; j++) { x += materialTensor[6*i + j] * lastState.LastStrainIncrement()(j); } state.LastStressIncrement()(i) = x; state.Stress()(i) += x; } } __global__ void __launch_bounds__(AXIS_YUZU_MAX_THREADS_PER_BLOCK) RunLinearIsoElasticKernel( uint64 numThreadsToUse, uint64 startIndex, void *baseMemoryAddressOnGPU, void * streamPtr, uint64 elementBlockSize, ayfm::RelativePointer reducedModelPtr, real currentTime, real lastTimeIncrement, real nextTimeIncrement ) { using axis::yabsref; using axis::yabsptr; uint64 baseIndex = ay::GetBaseThreadIndex(gridDim, blockIdx, blockDim, threadIdx); if (!ay::IsActiveThread(baseIndex + startIndex, numThreadsToUse)) return; ayde::ElementData eData(baseMemoryAddressOnGPU, baseIndex, elementBlockSize); ayda::ReducedNumericalModel& model = yabsref<ayda::ReducedNumericalModel>(reducedModelPtr); // get FE geometry data uint64 elementId = eData.GetId(); ayde::FiniteElement &element = model.GetElement((size_type)elementId); ayde::ElementGeometry& g = element.Geometry(); aydp::InfinitesimalState& elementState = element.PhysicalState(); const real *materialTensor = eData.MaterialTensor(); int pointCount = g.GetIntegrationPointCount(); if (pointCount > 0) { elementState.Stress().ClearAll(); elementState.LastStressIncrement().ClearAll(); for (int pointIdx = 0; pointIdx < pointCount; ++pointIdx) { aydi::IntegrationPoint& p = g.GetIntegrationPoint(pointIdx); aydp::InfinitesimalState& pointState = p.State(); // call stress update function aydp::UpdatedPhysicalState ups(pointState); UpdateStress(materialTensor, ups, pointState); // add parcel to element mean stress ayfb::VectorSum(elementState.Stress(), 1.0, elementState.Stress(), 1.0, pointState.Stress()); ayfb::VectorSum(elementState.LastStressIncrement(), 1.0, elementState.LastStressIncrement(), 1.0, pointState.LastStressIncrement()); } // element stress is the average of all parcels elementState.Stress().Scale(1.0 / (real)pointCount); elementState.LastStressIncrement().Scale(1.0 / (real)pointCount); } else { // formulation does not use integration point; // just call stress update function directly on it aydp::UpdatedPhysicalState ups(elementState); UpdateStress(materialTensor, ups, elementState); } } extern void axis::domain::materials::linear_iso_commands::RunLinearIsoElasticOnGPU( uint64 numThreadsToUse, uint64 startIndex, void *baseMemoryAddressOnGPU, const axis::Dimension3D& gridDim, const axis::Dimension3D& blockDim, void * streamPtr, uint64 elementBlockSize, afm::RelativePointer& reducedModelPtr, real currentTime, real lastTimeIncrement, real nextTimeIncrement ) { dim3 grid, block; grid.x = gridDim.X; grid.y = gridDim.Y; grid.z = gridDim.Z; block.x = blockDim.X; block.y = blockDim.Y; block.z = blockDim.Z; RunLinearIsoElasticKernel<<<grid, block, 0, (cudaStream_t)streamPtr>>>( numThreadsToUse, startIndex, baseMemoryAddressOnGPU, streamPtr, elementBlockSize, reinterpret_cast<ayfm::RelativePointer&>(reducedModelPtr), currentTime, lastTimeIncrement, nextTimeIncrement); }
505e49ac66487d2259cee6ebb800964bd40fe4bc.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> namespace { template <typename scalar_t> __device__ __forceinline__ scalar_t sigmoid(scalar_t z) { return 1.0 / (1.0 + exp(-z)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) { const auto s = sigmoid(z); return (1.0 - s) * s; } template <typename scalar_t> __device__ __forceinline__ scalar_t d_tanh(scalar_t z) { const auto t = tanh(z); return 1 - (t * t); } template <typename scalar_t> __device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) { return fmax(0.0, z) + fmin(0.0, alpha * (exp(z) - 1.0)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) { const auto e = exp(z); const auto d_relu = z < 0.0 ? 0.0 : 1.0; return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0); } template <typename scalar_t> __global__ void lltm_cuda_forward_kernel( const scalar_t* __restrict__ gates, const scalar_t* __restrict__ old_cell, scalar_t* __restrict__ new_h, scalar_t* __restrict__ new_cell, scalar_t* __restrict__ input_gate, scalar_t* __restrict__ output_gate, scalar_t* __restrict__ candidate_cell, size_t state_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int index = blockIdx.y * state_size + column; const int gates_row = blockIdx.y * (state_size * 3); if (column < state_size) { input_gate[index] = sigmoid(gates[gates_row + column]); output_gate[index] = sigmoid(gates[gates_row + state_size + column]); candidate_cell[index] = elu(gates[gates_row + 2 * state_size + column]); new_cell[index] = old_cell[index] + candidate_cell[index] * input_gate[index]; new_h[index] = tanh(new_cell[index]) * output_gate[index]; } } template <typename scalar_t> __global__ void lltm_cuda_backward_kernel( scalar_t* __restrict__ d_old_cell, scalar_t* __restrict__ d_gates, const scalar_t* __restrict__ grad_h, const scalar_t* __restrict__ grad_cell, const scalar_t* __restrict__ new_cell, const scalar_t* __restrict__ input_gate, const scalar_t* __restrict__ output_gate, const scalar_t* __restrict__ candidate_cell, const scalar_t* __restrict__ gate_weights, size_t state_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int index = blockIdx.y * state_size + column; const int gates_row = blockIdx.y * (state_size * 3); if (column < state_size) { const auto d_output_gate = tanh(new_cell[index]) * grad_h[index]; const auto d_tanh_new_cell = output_gate[index] * grad_h[index]; const auto d_new_cell = d_tanh(new_cell[index]) * d_tanh_new_cell + grad_cell[index]; d_old_cell[index] = d_new_cell; const auto d_candidate_cell = input_gate[index] * d_new_cell; const auto d_input_gate = candidate_cell[index] * d_new_cell; const auto input_gate_index = gates_row + column; const auto output_gate_index = gates_row + state_size + column; const auto candidate_cell_index = gates_row + 2 * state_size + column; d_gates[input_gate_index] = d_input_gate * d_sigmoid(gate_weights[input_gate_index]); d_gates[output_gate_index] = d_output_gate * d_sigmoid(gate_weights[output_gate_index]); d_gates[candidate_cell_index] = d_candidate_cell * d_elu(gate_weights[candidate_cell_index]); } } } // namespace std::vector<at::Tensor> lltm_cuda_forward( at::Tensor input, at::Tensor weights, at::Tensor bias, at::Tensor old_h, at::Tensor old_cell) { auto X = at::cat({old_h, input}, /*dim=*/1); auto gates = at::addmm(bias, X, weights.transpose(0, 1)); const auto batch_size = old_cell.size(0); const auto state_size = old_cell.size(1); auto new_h = at::zeros_like(old_cell); auto new_cell = at::zeros_like(old_cell); auto input_gate = at::zeros_like(old_cell); auto output_gate = at::zeros_like(old_cell); auto candidate_cell = at::zeros_like(old_cell); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] { hipLaunchKernelGGL(( lltm_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, gates.data<scalar_t>(), old_cell.data<scalar_t>(), new_h.data<scalar_t>(), new_cell.data<scalar_t>(), input_gate.data<scalar_t>(), output_gate.data<scalar_t>(), candidate_cell.data<scalar_t>(), state_size); })); return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates}; } std::vector<at::Tensor> lltm_cuda_backward( at::Tensor grad_h, at::Tensor grad_cell, at::Tensor new_cell, at::Tensor input_gate, at::Tensor output_gate, at::Tensor candidate_cell, at::Tensor X, at::Tensor gate_weights, at::Tensor weights) { auto d_old_cell = at::zeros_like(new_cell); auto d_gates = at::zeros_like(gate_weights); const auto batch_size = new_cell.size(0); const auto state_size = new_cell.size(1); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] { hipLaunchKernelGGL(( lltm_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, d_old_cell.data<scalar_t>(), d_gates.data<scalar_t>(), grad_h.data<scalar_t>(), grad_cell.data<scalar_t>(), new_cell.data<scalar_t>(), input_gate.data<scalar_t>(), output_gate.data<scalar_t>(), candidate_cell.data<scalar_t>(), gate_weights.data<scalar_t>(), state_size); })); auto d_weights = d_gates.t().mm(X); auto d_bias = d_gates.sum(/*dim=*/0, /*keepdim=*/true); auto d_X = d_gates.mm(weights); auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size); auto d_input = d_X.slice(/*dim=*/1, state_size); return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates}; }
505e49ac66487d2259cee6ebb800964bd40fe4bc.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> namespace { template <typename scalar_t> __device__ __forceinline__ scalar_t sigmoid(scalar_t z) { return 1.0 / (1.0 + exp(-z)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) { const auto s = sigmoid(z); return (1.0 - s) * s; } template <typename scalar_t> __device__ __forceinline__ scalar_t d_tanh(scalar_t z) { const auto t = tanh(z); return 1 - (t * t); } template <typename scalar_t> __device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) { return fmax(0.0, z) + fmin(0.0, alpha * (exp(z) - 1.0)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) { const auto e = exp(z); const auto d_relu = z < 0.0 ? 0.0 : 1.0; return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0); } template <typename scalar_t> __global__ void lltm_cuda_forward_kernel( const scalar_t* __restrict__ gates, const scalar_t* __restrict__ old_cell, scalar_t* __restrict__ new_h, scalar_t* __restrict__ new_cell, scalar_t* __restrict__ input_gate, scalar_t* __restrict__ output_gate, scalar_t* __restrict__ candidate_cell, size_t state_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int index = blockIdx.y * state_size + column; const int gates_row = blockIdx.y * (state_size * 3); if (column < state_size) { input_gate[index] = sigmoid(gates[gates_row + column]); output_gate[index] = sigmoid(gates[gates_row + state_size + column]); candidate_cell[index] = elu(gates[gates_row + 2 * state_size + column]); new_cell[index] = old_cell[index] + candidate_cell[index] * input_gate[index]; new_h[index] = tanh(new_cell[index]) * output_gate[index]; } } template <typename scalar_t> __global__ void lltm_cuda_backward_kernel( scalar_t* __restrict__ d_old_cell, scalar_t* __restrict__ d_gates, const scalar_t* __restrict__ grad_h, const scalar_t* __restrict__ grad_cell, const scalar_t* __restrict__ new_cell, const scalar_t* __restrict__ input_gate, const scalar_t* __restrict__ output_gate, const scalar_t* __restrict__ candidate_cell, const scalar_t* __restrict__ gate_weights, size_t state_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int index = blockIdx.y * state_size + column; const int gates_row = blockIdx.y * (state_size * 3); if (column < state_size) { const auto d_output_gate = tanh(new_cell[index]) * grad_h[index]; const auto d_tanh_new_cell = output_gate[index] * grad_h[index]; const auto d_new_cell = d_tanh(new_cell[index]) * d_tanh_new_cell + grad_cell[index]; d_old_cell[index] = d_new_cell; const auto d_candidate_cell = input_gate[index] * d_new_cell; const auto d_input_gate = candidate_cell[index] * d_new_cell; const auto input_gate_index = gates_row + column; const auto output_gate_index = gates_row + state_size + column; const auto candidate_cell_index = gates_row + 2 * state_size + column; d_gates[input_gate_index] = d_input_gate * d_sigmoid(gate_weights[input_gate_index]); d_gates[output_gate_index] = d_output_gate * d_sigmoid(gate_weights[output_gate_index]); d_gates[candidate_cell_index] = d_candidate_cell * d_elu(gate_weights[candidate_cell_index]); } } } // namespace std::vector<at::Tensor> lltm_cuda_forward( at::Tensor input, at::Tensor weights, at::Tensor bias, at::Tensor old_h, at::Tensor old_cell) { auto X = at::cat({old_h, input}, /*dim=*/1); auto gates = at::addmm(bias, X, weights.transpose(0, 1)); const auto batch_size = old_cell.size(0); const auto state_size = old_cell.size(1); auto new_h = at::zeros_like(old_cell); auto new_cell = at::zeros_like(old_cell); auto input_gate = at::zeros_like(old_cell); auto output_gate = at::zeros_like(old_cell); auto candidate_cell = at::zeros_like(old_cell); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] { lltm_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( gates.data<scalar_t>(), old_cell.data<scalar_t>(), new_h.data<scalar_t>(), new_cell.data<scalar_t>(), input_gate.data<scalar_t>(), output_gate.data<scalar_t>(), candidate_cell.data<scalar_t>(), state_size); })); return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates}; } std::vector<at::Tensor> lltm_cuda_backward( at::Tensor grad_h, at::Tensor grad_cell, at::Tensor new_cell, at::Tensor input_gate, at::Tensor output_gate, at::Tensor candidate_cell, at::Tensor X, at::Tensor gate_weights, at::Tensor weights) { auto d_old_cell = at::zeros_like(new_cell); auto d_gates = at::zeros_like(gate_weights); const auto batch_size = new_cell.size(0); const auto state_size = new_cell.size(1); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] { lltm_cuda_backward_kernel<scalar_t><<<blocks, threads>>>( d_old_cell.data<scalar_t>(), d_gates.data<scalar_t>(), grad_h.data<scalar_t>(), grad_cell.data<scalar_t>(), new_cell.data<scalar_t>(), input_gate.data<scalar_t>(), output_gate.data<scalar_t>(), candidate_cell.data<scalar_t>(), gate_weights.data<scalar_t>(), state_size); })); auto d_weights = d_gates.t().mm(X); auto d_bias = d_gates.sum(/*dim=*/0, /*keepdim=*/true); auto d_X = d_gates.mm(weights); auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size); auto d_input = d_X.slice(/*dim=*/1, state_size); return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates}; }
b7bef76ad39b26e586c6e62f3f45defa4bb39ec0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_z #define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)] #define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)] #define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b) #define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b) //============================================================ #define ldb m #define lda m #define ldc m // every multiprocessor handles one BCSR-block __global__ void zbcsrlupivloc_kernel( int size_b, int kblocks, double **A, magma_int_t *ipiv) { if( blockIdx.x < kblocks ) { if(threadIdx.x < size_b ){ for( int i=0; i<size_b; i++){ int dst = ipiv[i]-1; if( dst != i ){ double *A1 = A[blockIdx.x]+threadIdx.x*size_b+i; double *A2 = A[blockIdx.x]+threadIdx.x*size_b+dst; double tmp = *A2; *A2 = *A1; *A1 = tmp; } } } } } /** Purpose ------- For a Block-CSR ILU factorization, this routine updates all blocks in the trailing matrix. Arguments --------- @param[in] size_b magma_int_t blocksize in BCSR @param[in] kblocks magma_int_t number of blocks @param[in] dA magmaDoubleComplex_ptr * matrix in BCSR @param[in] ipiv magmaInt_ptr array containing pivots @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbcsrlupivloc( magma_int_t size_b, magma_int_t kblocks, magmaDoubleComplex_ptr *dA, magmaInt_ptr ipiv, magma_queue_t queue ) { #if defined(PRECISION_d) dim3 threads( 64, 1 ); dim3 grid(kblocks, 1, 1); hipLaunchKernelGGL(( zbcsrlupivloc_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , size_b, kblocks, dA, ipiv ); #endif return MAGMA_SUCCESS; }
b7bef76ad39b26e586c6e62f3f45defa4bb39ec0.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_z #define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)] #define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)] #define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b) #define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b) //============================================================ #define ldb m #define lda m #define ldc m // every multiprocessor handles one BCSR-block __global__ void zbcsrlupivloc_kernel( int size_b, int kblocks, double **A, magma_int_t *ipiv) { if( blockIdx.x < kblocks ) { if(threadIdx.x < size_b ){ for( int i=0; i<size_b; i++){ int dst = ipiv[i]-1; if( dst != i ){ double *A1 = A[blockIdx.x]+threadIdx.x*size_b+i; double *A2 = A[blockIdx.x]+threadIdx.x*size_b+dst; double tmp = *A2; *A2 = *A1; *A1 = tmp; } } } } } /** Purpose ------- For a Block-CSR ILU factorization, this routine updates all blocks in the trailing matrix. Arguments --------- @param[in] size_b magma_int_t blocksize in BCSR @param[in] kblocks magma_int_t number of blocks @param[in] dA magmaDoubleComplex_ptr * matrix in BCSR @param[in] ipiv magmaInt_ptr array containing pivots @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbcsrlupivloc( magma_int_t size_b, magma_int_t kblocks, magmaDoubleComplex_ptr *dA, magmaInt_ptr ipiv, magma_queue_t queue ) { #if defined(PRECISION_d) dim3 threads( 64, 1 ); dim3 grid(kblocks, 1, 1); zbcsrlupivloc_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( size_b, kblocks, dA, ipiv ); #endif return MAGMA_SUCCESS; }
1d558d5f7609ee3f87a98a93a612d8eb2ae07b65.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include <float.h> #include "hip/hip_complex.h" #include "time.h" #include "hip/hip_runtime.h" #include <iostream> __global__ void test(float loop, float *out) { register float a=1.0f; register float b=1.0f; register float c=1.0f; register float d=1.0f; register float e=1.0f; register float f=1.0f; register float g=1.0f; register float h=1.0f; for (float x=0;x<loop;x++) { a+=x*loop; b+=x*loop; c+=x*loop; d+=x*loop; e+=x*loop; f+=x*loop; g+=x*loop; h+=x*loop; a+=x*loop; b+=x*loop; c+=x*loop; d+=x*loop; e+=x*loop; f+=x*loop; g+=x*loop; h+=x*loop; a+=x*loop; b+=x*loop; c+=x*loop; d+=x*loop; e+=x*loop; f+=x*loop; g+=x*loop; h+=x*loop; a+=x*loop; b+=x*loop; c+=x*loop; d+=x*loop; e+=x*loop; f+=x*loop; g+=x*loop; h+=x*loop; a+=x*loop; b+=x*loop; c+=x*loop; d+=x*loop; e+=x*loop; f+=x*loop; g+=x*loop; h+=x*loop; } if (out!=NULL) *out=a+b+c+d+e+f+g+h; } int main(int argc, char *argv[]) { float timestamp; hipEvent_t event_start,event_stop; // Initialise hipDeviceReset(); hipSetDevice(0); hipDeviceSetCacheConfig(hipFuncCachePreferShared); // Allocate and generate buffers hipEventCreate(&event_start); hipEventCreate(&event_stop); hipEventRecord(event_start, 0); dim3 threadsPerBlock; dim3 blocks; threadsPerBlock.x=32; threadsPerBlock.y=32; threadsPerBlock.z=1; blocks.x=1; blocks.y=1000; blocks.z=1; hipLaunchKernelGGL(( test), dim3(blocks),dim3(threadsPerBlock),0, 0, 1000,NULL); hipEventRecord(event_stop, 0); hipEventSynchronize(event_stop); hipEventElapsedTime(&timestamp, event_start, event_stop); printf("Calculated in %f\n", timestamp); double mflops = (1000*1024*30*80*(1.0e-6))/timestamp; std::cout << "MegaFlops: " << mflops << std::endl; system("pause"); }
1d558d5f7609ee3f87a98a93a612d8eb2ae07b65.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include <float.h> #include "cuComplex.h" #include "time.h" #include "cuda_runtime.h" #include <iostream> __global__ void test(float loop, float *out) { register float a=1.0f; register float b=1.0f; register float c=1.0f; register float d=1.0f; register float e=1.0f; register float f=1.0f; register float g=1.0f; register float h=1.0f; for (float x=0;x<loop;x++) { a+=x*loop; b+=x*loop; c+=x*loop; d+=x*loop; e+=x*loop; f+=x*loop; g+=x*loop; h+=x*loop; a+=x*loop; b+=x*loop; c+=x*loop; d+=x*loop; e+=x*loop; f+=x*loop; g+=x*loop; h+=x*loop; a+=x*loop; b+=x*loop; c+=x*loop; d+=x*loop; e+=x*loop; f+=x*loop; g+=x*loop; h+=x*loop; a+=x*loop; b+=x*loop; c+=x*loop; d+=x*loop; e+=x*loop; f+=x*loop; g+=x*loop; h+=x*loop; a+=x*loop; b+=x*loop; c+=x*loop; d+=x*loop; e+=x*loop; f+=x*loop; g+=x*loop; h+=x*loop; } if (out!=NULL) *out=a+b+c+d+e+f+g+h; } int main(int argc, char *argv[]) { float timestamp; cudaEvent_t event_start,event_stop; // Initialise cudaDeviceReset(); cudaSetDevice(0); cudaThreadSetCacheConfig(cudaFuncCachePreferShared); // Allocate and generate buffers cudaEventCreate(&event_start); cudaEventCreate(&event_stop); cudaEventRecord(event_start, 0); dim3 threadsPerBlock; dim3 blocks; threadsPerBlock.x=32; threadsPerBlock.y=32; threadsPerBlock.z=1; blocks.x=1; blocks.y=1000; blocks.z=1; test<<<blocks,threadsPerBlock,0>>>(1000,NULL); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Calculated in %f\n", timestamp); double mflops = (1000*1024*30*80*(1.0e-6))/timestamp; std::cout << "MegaFlops: " << mflops << std::endl; system("pause"); }
cd8058b5d17b026e208f6efca33ec8c0f1b77be0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define PRECISION_z #define BLOCKSIZE 256 __global__ void magma_zk_testLocking(unsigned int* locks, int n) { int id = threadIdx.x % n; bool leaveLoop = false; while (!leaveLoop) { if (atomicExch(&(locks[id]), 1u) == 0u) { //critical section leaveLoop = true; atomicExch(&(locks[id]),0u); } } } /* __global__ void magma_zbajac_csr_o_ls_kernel(int localiters, int n, int matrices, int overlap, magma_z_matrix *D, magma_z_matrix *R, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { // int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; // int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; magmaDoubleComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; //valR = R[ (1+blockIdx.x-1)%matrices ].dval; //colR = R[ (1+blockIdx.x-1)%matrices ].dcol; //rowR = R[ (1+blockIdx.x-1)%matrices ].drow; //valD = D[ (1+blockIdx.x-1)%matrices ].dval; //colD = D[ (1+blockIdx.x-1)%matrices ].dcol; //rowD = D[ (1+blockIdx.x-1)%matrices ].drow; if (blockIdx.x%2 == 1) { valR = R[0].dval; valD = D[0].dval; colR = R[0].dcol; rowR = R[0].drow; colD = D[0].dcol; rowD = D[0].drow; } else { valR = R[1].dval; valD = D[1].dval; colR = R[1].dcol; rowR = R[1].drow; colD = D[1].dcol; rowD = D[1].drow; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; printf("bdx:%d idx:%d start:%d end:%d\n", blockIdx.x, threadIdx.x, start, end); #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for (i = start; i < end; i++) v += valR[i] * x[ colR[i] ]; start = rowD[index]; end = rowD[index+1]; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations local_x[threadIdx.x] = x[index]; //+ ( v - tmp); // / (valD[start]); __syncthreads(); #pragma unroll for (j = 0; j < localiters-1; j++) { tmp = zero; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if (threadIdx.x > overlap) { // RAS x[index] = local_x[threadIdx.x]; } } } */ __global__ void magma_zbajac_csr_o_ls_kernel1(int localiters, int n, int matrices, int overlap, magmaDoubleComplex * valD, magma_index_t * rowD, magma_index_t * colD, magmaDoubleComplex * valR, magma_index_t * rowR, magma_index_t * colR, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { int inddiag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; //bool leaveLoop = false; if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_zbajac_csr_o_ls_kernel2(int localiters, int n, int matrices, int overlap, magmaDoubleComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaDoubleComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaDoubleComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaDoubleComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; //bool leaveLoop = false; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; magmaDoubleComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if (blockIdx.x%matrices == 0) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if (blockIdx.x%matrices == 1) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_zbajac_csr_o_ls_kernel4(int localiters, int n, int matrices, int overlap, magmaDoubleComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaDoubleComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaDoubleComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaDoubleComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, magmaDoubleComplex * valD2, magma_index_t * rowD2, magma_index_t * colD2, magmaDoubleComplex * valR2, magma_index_t * rowR2, magma_index_t * colR2, magmaDoubleComplex * valD3, magma_index_t * rowD3, magma_index_t * colD3, magmaDoubleComplex * valR3, magma_index_t * rowR3, magma_index_t * colR3, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; //bool leaveLoop = false; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; magmaDoubleComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==1 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==2 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==3 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_zbajac_csr_o_ls_kernel8(int localiters, int n, int matrices, int overlap, magmaDoubleComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaDoubleComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaDoubleComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaDoubleComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, magmaDoubleComplex * valD2, magma_index_t * rowD2, magma_index_t * colD2, magmaDoubleComplex * valR2, magma_index_t * rowR2, magma_index_t * colR2, magmaDoubleComplex * valD3, magma_index_t * rowD3, magma_index_t * colD3, magmaDoubleComplex * valR3, magma_index_t * rowR3, magma_index_t * colR3, magmaDoubleComplex * valD4, magma_index_t * rowD4, magma_index_t * colD4, magmaDoubleComplex * valR4, magma_index_t * rowR4, magma_index_t * colR4, magmaDoubleComplex * valD5, magma_index_t * rowD5, magma_index_t * colD5, magmaDoubleComplex * valR5, magma_index_t * rowR5, magma_index_t * colR5, magmaDoubleComplex * valD6, magma_index_t * rowD6, magma_index_t * colD6, magmaDoubleComplex * valR6, magma_index_t * rowR6, magma_index_t * colR6, magmaDoubleComplex * valD7, magma_index_t * rowD7, magma_index_t * colD7, magmaDoubleComplex * valR7, magma_index_t * rowR7, magma_index_t * colR7, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; magmaDoubleComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if( blockIdx.x%matrices==0 ){ valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }else if ( blockIdx.x%matrices==1 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }else if ( blockIdx.x%matrices==2 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }else if ( blockIdx.x%matrices==3 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }else if ( blockIdx.x%matrices==4 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==5 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==6 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==7 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_zbajac_csr_o_ls_kernel16(int localiters, int n, int matrices, int overlap, magmaDoubleComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaDoubleComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaDoubleComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaDoubleComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaDoubleComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaDoubleComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaDoubleComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaDoubleComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaDoubleComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaDoubleComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaDoubleComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaDoubleComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaDoubleComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaDoubleComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaDoubleComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaDoubleComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaDoubleComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaDoubleComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaDoubleComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaDoubleComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaDoubleComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaDoubleComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaDoubleComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaDoubleComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaDoubleComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaDoubleComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaDoubleComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaDoubleComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaDoubleComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaDoubleComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaDoubleComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaDoubleComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; magmaDoubleComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==1 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==2 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==3 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==4 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==5 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==6 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==7 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==8 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==9 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==10 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==11 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==12 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==13 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==14 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==15 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_zbajac_csr_o_ls_kernel32(int localiters, int n, int matrices, int overlap, magmaDoubleComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaDoubleComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaDoubleComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaDoubleComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaDoubleComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaDoubleComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaDoubleComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaDoubleComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaDoubleComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaDoubleComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaDoubleComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaDoubleComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaDoubleComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaDoubleComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaDoubleComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaDoubleComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaDoubleComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaDoubleComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaDoubleComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaDoubleComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaDoubleComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaDoubleComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaDoubleComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaDoubleComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaDoubleComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaDoubleComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaDoubleComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaDoubleComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaDoubleComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaDoubleComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaDoubleComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaDoubleComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, magmaDoubleComplex *valD16, magma_index_t *rowD16, magma_index_t *colD16, magmaDoubleComplex *valR16, magma_index_t *rowR16, magma_index_t *colR16, magmaDoubleComplex *valD17, magma_index_t *rowD17, magma_index_t *colD17, magmaDoubleComplex *valR17, magma_index_t *rowR17, magma_index_t *colR17, magmaDoubleComplex *valD18, magma_index_t *rowD18, magma_index_t *colD18, magmaDoubleComplex *valR18, magma_index_t *rowR18, magma_index_t *colR18, magmaDoubleComplex *valD19, magma_index_t *rowD19, magma_index_t *colD19, magmaDoubleComplex *valR19, magma_index_t *rowR19, magma_index_t *colR19, magmaDoubleComplex *valD20, magma_index_t *rowD20, magma_index_t *colD20, magmaDoubleComplex *valR20, magma_index_t *rowR20, magma_index_t *colR20, magmaDoubleComplex *valD21, magma_index_t *rowD21, magma_index_t *colD21, magmaDoubleComplex *valR21, magma_index_t *rowR21, magma_index_t *colR21, magmaDoubleComplex *valD22, magma_index_t *rowD22, magma_index_t *colD22, magmaDoubleComplex *valR22, magma_index_t *rowR22, magma_index_t *colR22, magmaDoubleComplex *valD23, magma_index_t *rowD23, magma_index_t *colD23, magmaDoubleComplex *valR23, magma_index_t *rowR23, magma_index_t *colR23, magmaDoubleComplex *valD24, magma_index_t *rowD24, magma_index_t *colD24, magmaDoubleComplex *valR24, magma_index_t *rowR24, magma_index_t *colR24, magmaDoubleComplex *valD25, magma_index_t *rowD25, magma_index_t *colD25, magmaDoubleComplex *valR25, magma_index_t *rowR25, magma_index_t *colR25, magmaDoubleComplex *valD26, magma_index_t *rowD26, magma_index_t *colD26, magmaDoubleComplex *valR26, magma_index_t *rowR26, magma_index_t *colR26, magmaDoubleComplex *valD27, magma_index_t *rowD27, magma_index_t *colD27, magmaDoubleComplex *valR27, magma_index_t *rowR27, magma_index_t *colR27, magmaDoubleComplex *valD28, magma_index_t *rowD28, magma_index_t *colD28, magmaDoubleComplex *valR28, magma_index_t *rowR28, magma_index_t *colR28, magmaDoubleComplex *valD29, magma_index_t *rowD29, magma_index_t *colD29, magmaDoubleComplex *valR29, magma_index_t *rowR29, magma_index_t *colR29, magmaDoubleComplex *valD30, magma_index_t *rowD30, magma_index_t *colD30, magmaDoubleComplex *valR30, magma_index_t *rowR30, magma_index_t *colR30, magmaDoubleComplex *valD31, magma_index_t *rowD31, magma_index_t *colD31, magmaDoubleComplex *valR31, magma_index_t *rowR31, magma_index_t *colR31, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; magmaDoubleComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==1 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==2 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==3 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==4 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==5 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==6 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==7 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==8 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==9 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==10 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==11 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==12 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==13 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==14 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==15 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==16 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==17 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==18 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==19 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==20 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==21 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==22 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==23 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==24 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==25 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==26 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==27 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==28 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==29 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==30 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==31 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_zbajac_csr_o_ls_kernel64(int localiters, int n, int matrices, int overlap, magmaDoubleComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaDoubleComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaDoubleComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaDoubleComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaDoubleComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaDoubleComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaDoubleComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaDoubleComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaDoubleComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaDoubleComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaDoubleComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaDoubleComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaDoubleComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaDoubleComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaDoubleComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaDoubleComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaDoubleComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaDoubleComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaDoubleComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaDoubleComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaDoubleComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaDoubleComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaDoubleComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaDoubleComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaDoubleComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaDoubleComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaDoubleComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaDoubleComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaDoubleComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaDoubleComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaDoubleComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaDoubleComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, magmaDoubleComplex *valD16, magma_index_t *rowD16, magma_index_t *colD16, magmaDoubleComplex *valR16, magma_index_t *rowR16, magma_index_t *colR16, magmaDoubleComplex *valD17, magma_index_t *rowD17, magma_index_t *colD17, magmaDoubleComplex *valR17, magma_index_t *rowR17, magma_index_t *colR17, magmaDoubleComplex *valD18, magma_index_t *rowD18, magma_index_t *colD18, magmaDoubleComplex *valR18, magma_index_t *rowR18, magma_index_t *colR18, magmaDoubleComplex *valD19, magma_index_t *rowD19, magma_index_t *colD19, magmaDoubleComplex *valR19, magma_index_t *rowR19, magma_index_t *colR19, magmaDoubleComplex *valD20, magma_index_t *rowD20, magma_index_t *colD20, magmaDoubleComplex *valR20, magma_index_t *rowR20, magma_index_t *colR20, magmaDoubleComplex *valD21, magma_index_t *rowD21, magma_index_t *colD21, magmaDoubleComplex *valR21, magma_index_t *rowR21, magma_index_t *colR21, magmaDoubleComplex *valD22, magma_index_t *rowD22, magma_index_t *colD22, magmaDoubleComplex *valR22, magma_index_t *rowR22, magma_index_t *colR22, magmaDoubleComplex *valD23, magma_index_t *rowD23, magma_index_t *colD23, magmaDoubleComplex *valR23, magma_index_t *rowR23, magma_index_t *colR23, magmaDoubleComplex *valD24, magma_index_t *rowD24, magma_index_t *colD24, magmaDoubleComplex *valR24, magma_index_t *rowR24, magma_index_t *colR24, magmaDoubleComplex *valD25, magma_index_t *rowD25, magma_index_t *colD25, magmaDoubleComplex *valR25, magma_index_t *rowR25, magma_index_t *colR25, magmaDoubleComplex *valD26, magma_index_t *rowD26, magma_index_t *colD26, magmaDoubleComplex *valR26, magma_index_t *rowR26, magma_index_t *colR26, magmaDoubleComplex *valD27, magma_index_t *rowD27, magma_index_t *colD27, magmaDoubleComplex *valR27, magma_index_t *rowR27, magma_index_t *colR27, magmaDoubleComplex *valD28, magma_index_t *rowD28, magma_index_t *colD28, magmaDoubleComplex *valR28, magma_index_t *rowR28, magma_index_t *colR28, magmaDoubleComplex *valD29, magma_index_t *rowD29, magma_index_t *colD29, magmaDoubleComplex *valR29, magma_index_t *rowR29, magma_index_t *colR29, magmaDoubleComplex *valD30, magma_index_t *rowD30, magma_index_t *colD30, magmaDoubleComplex *valR30, magma_index_t *rowR30, magma_index_t *colR30, magmaDoubleComplex *valD31, magma_index_t *rowD31, magma_index_t *colD31, magmaDoubleComplex *valR31, magma_index_t *rowR31, magma_index_t *colR31, magmaDoubleComplex *valD32, magma_index_t *rowD32, magma_index_t *colD32, magmaDoubleComplex *valR32, magma_index_t *rowR32, magma_index_t *colR32, magmaDoubleComplex *valD33, magma_index_t *rowD33, magma_index_t *colD33, magmaDoubleComplex *valR33, magma_index_t *rowR33, magma_index_t *colR33, magmaDoubleComplex *valD34, magma_index_t *rowD34, magma_index_t *colD34, magmaDoubleComplex *valR34, magma_index_t *rowR34, magma_index_t *colR34, magmaDoubleComplex *valD35, magma_index_t *rowD35, magma_index_t *colD35, magmaDoubleComplex *valR35, magma_index_t *rowR35, magma_index_t *colR35, magmaDoubleComplex *valD36, magma_index_t *rowD36, magma_index_t *colD36, magmaDoubleComplex *valR36, magma_index_t *rowR36, magma_index_t *colR36, magmaDoubleComplex *valD37, magma_index_t *rowD37, magma_index_t *colD37, magmaDoubleComplex *valR37, magma_index_t *rowR37, magma_index_t *colR37, magmaDoubleComplex *valD38, magma_index_t *rowD38, magma_index_t *colD38, magmaDoubleComplex *valR38, magma_index_t *rowR38, magma_index_t *colR38, magmaDoubleComplex *valD39, magma_index_t *rowD39, magma_index_t *colD39, magmaDoubleComplex *valR39, magma_index_t *rowR39, magma_index_t *colR39, magmaDoubleComplex *valD40, magma_index_t *rowD40, magma_index_t *colD40, magmaDoubleComplex *valR40, magma_index_t *rowR40, magma_index_t *colR40, magmaDoubleComplex *valD41, magma_index_t *rowD41, magma_index_t *colD41, magmaDoubleComplex *valR41, magma_index_t *rowR41, magma_index_t *colR41, magmaDoubleComplex *valD42, magma_index_t *rowD42, magma_index_t *colD42, magmaDoubleComplex *valR42, magma_index_t *rowR42, magma_index_t *colR42, magmaDoubleComplex *valD43, magma_index_t *rowD43, magma_index_t *colD43, magmaDoubleComplex *valR43, magma_index_t *rowR43, magma_index_t *colR43, magmaDoubleComplex *valD44, magma_index_t *rowD44, magma_index_t *colD44, magmaDoubleComplex *valR44, magma_index_t *rowR44, magma_index_t *colR44, magmaDoubleComplex *valD45, magma_index_t *rowD45, magma_index_t *colD45, magmaDoubleComplex *valR45, magma_index_t *rowR45, magma_index_t *colR45, magmaDoubleComplex *valD46, magma_index_t *rowD46, magma_index_t *colD46, magmaDoubleComplex *valR46, magma_index_t *rowR46, magma_index_t *colR46, magmaDoubleComplex *valD47, magma_index_t *rowD47, magma_index_t *colD47, magmaDoubleComplex *valR47, magma_index_t *rowR47, magma_index_t *colR47, magmaDoubleComplex *valD48, magma_index_t *rowD48, magma_index_t *colD48, magmaDoubleComplex *valR48, magma_index_t *rowR48, magma_index_t *colR48, magmaDoubleComplex *valD49, magma_index_t *rowD49, magma_index_t *colD49, magmaDoubleComplex *valR49, magma_index_t *rowR49, magma_index_t *colR49, magmaDoubleComplex *valD50, magma_index_t *rowD50, magma_index_t *colD50, magmaDoubleComplex *valR50, magma_index_t *rowR50, magma_index_t *colR50, magmaDoubleComplex *valD51, magma_index_t *rowD51, magma_index_t *colD51, magmaDoubleComplex *valR51, magma_index_t *rowR51, magma_index_t *colR51, magmaDoubleComplex *valD52, magma_index_t *rowD52, magma_index_t *colD52, magmaDoubleComplex *valR52, magma_index_t *rowR52, magma_index_t *colR52, magmaDoubleComplex *valD53, magma_index_t *rowD53, magma_index_t *colD53, magmaDoubleComplex *valR53, magma_index_t *rowR53, magma_index_t *colR53, magmaDoubleComplex *valD54, magma_index_t *rowD54, magma_index_t *colD54, magmaDoubleComplex *valR54, magma_index_t *rowR54, magma_index_t *colR54, magmaDoubleComplex *valD55, magma_index_t *rowD55, magma_index_t *colD55, magmaDoubleComplex *valR55, magma_index_t *rowR55, magma_index_t *colR55, magmaDoubleComplex *valD56, magma_index_t *rowD56, magma_index_t *colD56, magmaDoubleComplex *valR56, magma_index_t *rowR56, magma_index_t *colR56, magmaDoubleComplex *valD57, magma_index_t *rowD57, magma_index_t *colD57, magmaDoubleComplex *valR57, magma_index_t *rowR57, magma_index_t *colR57, magmaDoubleComplex *valD58, magma_index_t *rowD58, magma_index_t *colD58, magmaDoubleComplex *valR58, magma_index_t *rowR58, magma_index_t *colR58, magmaDoubleComplex *valD59, magma_index_t *rowD59, magma_index_t *colD59, magmaDoubleComplex *valR59, magma_index_t *rowR59, magma_index_t *colR59, magmaDoubleComplex *valD60, magma_index_t *rowD60, magma_index_t *colD60, magmaDoubleComplex *valR60, magma_index_t *rowR60, magma_index_t *colR60, magmaDoubleComplex *valD61, magma_index_t *rowD61, magma_index_t *colD61, magmaDoubleComplex *valR61, magma_index_t *rowR61, magma_index_t *colR61, magmaDoubleComplex *valD62, magma_index_t *rowD62, magma_index_t *colD62, magmaDoubleComplex *valR62, magma_index_t *rowR62, magma_index_t *colR62, magmaDoubleComplex *valD63, magma_index_t *rowD63, magma_index_t *colD63, magmaDoubleComplex *valR63, magma_index_t *rowR63, magma_index_t *colR63, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; magmaDoubleComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR63; valD = valD63; colR = colR63; rowR = rowR63; colD = colD63; rowD = rowD63; } else if ( blockIdx.x%matrices==1 ) { valR = valR62; valD = valD62; colR = colR62; rowR = rowR62; colD = colD62; rowD = rowD62; } else if ( blockIdx.x%matrices==2 ) { valR = valR61; valD = valD61; colR = colR61; rowR = rowR61; colD = colD61; rowD = rowD61; } else if ( blockIdx.x%matrices==3 ) { valR = valR60; valD = valD60; colR = colR60; rowR = rowR60; colD = colD60; rowD = rowD60; } else if ( blockIdx.x%matrices==4 ) { valR = valR59; valD = valD59; colR = colR59; rowR = rowR59; colD = colD59; rowD = rowD59; } else if ( blockIdx.x%matrices==5 ) { valR = valR58; valD = valD58; colR = colR58; rowR = rowR58; colD = colD58; rowD = rowD58; } else if ( blockIdx.x%matrices==6 ) { valR = valR57; valD = valD57; colR = colR57; rowR = rowR57; colD = colD57; rowD = rowD57; } else if ( blockIdx.x%matrices==7 ) { valR = valR56; valD = valD56; colR = colR56; rowR = rowR56; colD = colD56; rowD = rowD56; } else if ( blockIdx.x%matrices==8 ) { valR = valR55; valD = valD55; colR = colR55; rowR = rowR55; colD = colD55; rowD = rowD55; } else if ( blockIdx.x%matrices==9 ) { valR = valR54; valD = valD54; colR = colR54; rowR = rowR54; colD = colD54; rowD = rowD54; } else if ( blockIdx.x%matrices==10 ) { valR = valR53; valD = valD53; colR = colR53; rowR = rowR53; colD = colD53; rowD = rowD53; } else if ( blockIdx.x%matrices==11 ) { valR = valR52; valD = valD52; colR = colR52; rowR = rowR52; colD = colD52; rowD = rowD52; } else if ( blockIdx.x%matrices==12 ) { valR = valR51; valD = valD51; colR = colR51; rowR = rowR51; colD = colD51; rowD = rowD51; } else if ( blockIdx.x%matrices==13 ) { valR = valR50; valD = valD50; colR = colR50; rowR = rowR50; colD = colD50; rowD = rowD50; } else if ( blockIdx.x%matrices==14 ) { valR = valR49; valD = valD49; colR = colR49; rowR = rowR49; colD = colD49; rowD = rowD49; } else if ( blockIdx.x%matrices==15 ) { valR = valR48; valD = valD48; colR = colR48; rowR = rowR48; colD = colD48; rowD = rowD48; } else if ( blockIdx.x%matrices==16 ) { valR = valR47; valD = valD47; colR = colR47; rowR = rowR47; colD = colD47; rowD = rowD47; } else if ( blockIdx.x%matrices==17 ) { valR = valR46; valD = valD46; colR = colR46; rowR = rowR46; colD = colD46; rowD = rowD46; } else if ( blockIdx.x%matrices==18 ) { valR = valR45; valD = valD45; colR = colR45; rowR = rowR45; colD = colD45; rowD = rowD45; } else if ( blockIdx.x%matrices==19 ) { valR = valR44; valD = valD44; colR = colR44; rowR = rowR44; colD = colD44; rowD = rowD44; } else if ( blockIdx.x%matrices==20 ) { valR = valR43; valD = valD43; colR = colR43; rowR = rowR43; colD = colD43; rowD = rowD43; } else if ( blockIdx.x%matrices==21 ) { valR = valR42; valD = valD42; colR = colR42; rowR = rowR42; colD = colD42; rowD = rowD42; } else if ( blockIdx.x%matrices==22 ) { valR = valR41; valD = valD41; colR = colR41; rowR = rowR41; colD = colD41; rowD = rowD41; } else if ( blockIdx.x%matrices==23 ) { valR = valR40; valD = valD40; colR = colR40; rowR = rowR40; colD = colD40; rowD = rowD40; } else if ( blockIdx.x%matrices==24 ) { valR = valR39; valD = valD39; colR = colR39; rowR = rowR39; colD = colD39; rowD = rowD39; } else if ( blockIdx.x%matrices==25 ) { valR = valR38; valD = valD38; colR = colR38; rowR = rowR38; colD = colD38; rowD = rowD38; } else if ( blockIdx.x%matrices==26 ) { valR = valR37; valD = valD37; colR = colR37; rowR = rowR37; colD = colD37; rowD = rowD37; } else if ( blockIdx.x%matrices==27 ) { valR = valR36; valD = valD36; colR = colR36; rowR = rowR36; colD = colD36; rowD = rowD36; } else if ( blockIdx.x%matrices==28 ) { valR = valR35; valD = valD35; colR = colR35; rowR = rowR35; colD = colD35; rowD = rowD35; } else if ( blockIdx.x%matrices==29 ) { valR = valR34; valD = valD34; colR = colR34; rowR = rowR34; colD = colD34; rowD = rowD34; } else if ( blockIdx.x%matrices==30 ) { valR = valR33; valD = valD33; colR = colR33; rowR = rowR33; colD = colD33; rowD = rowD33; } else if ( blockIdx.x%matrices==31 ) { valR = valR32; valD = valD32; colR = colR32; rowR = rowR32; colD = colD32; rowD = rowD32; } else if ( blockIdx.x%matrices==32 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==33 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==34 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==35 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==36 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==37 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==38 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==39 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==40 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==41 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==42 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==43 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==44 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==45 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==46 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==47 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==48 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==49 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==50 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==51 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==52 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==53 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==54 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==55 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==56 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==57 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==58 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==59 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==60 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==61 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==62 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==63 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration with directed restricted additive Schwarz overlap (top-down) performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param[in] localiters magma_int_t number of local Jacobi-like updates @param[in] matrices magma_int_t number of sub-matrices @param[in] overlap magma_int_t size of the overlap @param[in] D magma_z_matrix* set of matrices with diagonal blocks @param[in] R magma_z_matrix* set of matrices with non-diagonal parts @param[in] b magma_z_matrix RHS @param[in] x magma_z_matrix* iterate/solution @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbajac_csr_overlap( magma_int_t localiters, magma_int_t matrices, magma_int_t overlap, magma_z_matrix *D, magma_z_matrix *R, magma_z_matrix b, magma_z_matrix *x, magma_queue_t queue ) { int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int size = D[0].num_rows; int min_nnz=100; for(int i=0; i<matrices; i++){ min_nnz = min(min_nnz, R[i].nnz); } if ( min_nnz > -1 ){ if ( matrices == 1 ){ int dimgrid1 = magma_ceildiv( size , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_zbajac_csr_o_ls_kernel1), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, b.dval, x->dval ); } else if (matrices == 2) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_zbajac_csr_o_ls_kernel2), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, b.dval, x->dval ); //magma_zbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 4){ int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_zbajac_csr_o_ls_kernel4), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, b.dval, x->dval ); //magma_zbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 8) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_zbajac_csr_o_ls_kernel8), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, D[4].dval, D[4].drow, D[4].dcol, R[4].dval, R[4].drow, R[4].dcol, D[5].dval, D[5].drow, D[5].dcol, R[5].dval, R[5].drow, R[5].dcol, D[6].dval, D[6].drow, D[6].dcol, R[6].dval, R[6].drow, R[6].dcol, D[7].dval, D[7].drow, D[7].dcol, R[7].dval, R[7].drow, R[7].dcol, b.dval, x->dval ); //magma_zbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 16) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_zbajac_csr_o_ls_kernel16), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, b.dval, x->dval ); } else if (matrices == 32) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_zbajac_csr_o_ls_kernel32), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, b.dval, x->dval ); } else if (matrices == 64) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_zbajac_csr_o_ls_kernel64), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, D[32].dval, D[32].drow, D[32].dcol, R[32].dval, R[32].drow, R[32].dcol, D[33].dval, D[33].drow, D[33].dcol, R[33].dval, R[33].drow, R[33].dcol, D[34].dval, D[34].drow, D[34].dcol, R[34].dval, R[34].drow, R[34].dcol, D[35].dval, D[35].drow, D[35].dcol, R[35].dval, R[35].drow, R[35].dcol, D[36].dval, D[36].drow, D[36].dcol, R[36].dval, R[36].drow, R[36].dcol, D[37].dval, D[37].drow, D[37].dcol, R[37].dval, R[37].drow, R[37].dcol, D[38].dval, D[38].drow, D[38].dcol, R[38].dval, R[38].drow, R[38].dcol, D[39].dval, D[39].drow, D[39].dcol, R[39].dval, R[39].drow, R[39].dcol, D[40].dval, D[40].drow, D[40].dcol, R[40].dval, R[40].drow, R[40].dcol, D[41].dval, D[41].drow, D[41].dcol, R[41].dval, R[41].drow, R[41].dcol, D[42].dval, D[42].drow, D[42].dcol, R[42].dval, R[42].drow, R[42].dcol, D[43].dval, D[43].drow, D[43].dcol, R[43].dval, R[43].drow, R[43].dcol, D[44].dval, D[44].drow, D[44].dcol, R[44].dval, R[44].drow, R[44].dcol, D[45].dval, D[45].drow, D[45].dcol, R[45].dval, R[45].drow, R[45].dcol, D[46].dval, D[46].drow, D[46].dcol, R[46].dval, R[46].drow, R[46].dcol, D[47].dval, D[47].drow, D[47].dcol, R[47].dval, R[47].drow, R[47].dcol, D[48].dval, D[48].drow, D[48].dcol, R[48].dval, R[48].drow, R[48].dcol, D[49].dval, D[49].drow, D[49].dcol, R[49].dval, R[49].drow, R[49].dcol, D[50].dval, D[50].drow, D[50].dcol, R[50].dval, R[50].drow, R[50].dcol, D[51].dval, D[51].drow, D[51].dcol, R[51].dval, R[51].drow, R[51].dcol, D[52].dval, D[52].drow, D[52].dcol, R[52].dval, R[52].drow, R[52].dcol, D[53].dval, D[53].drow, D[53].dcol, R[53].dval, R[53].drow, R[53].dcol, D[54].dval, D[54].drow, D[54].dcol, R[54].dval, R[54].drow, R[54].dcol, D[55].dval, D[55].drow, D[55].dcol, R[55].dval, R[55].drow, R[55].dcol, D[56].dval, D[56].drow, D[56].dcol, R[56].dval, R[56].drow, R[56].dcol, D[57].dval, D[57].drow, D[57].dcol, R[57].dval, R[57].drow, R[57].dcol, D[58].dval, D[58].drow, D[58].dcol, R[58].dval, R[58].drow, R[58].dcol, D[59].dval, D[59].drow, D[59].dcol, R[59].dval, R[59].drow, R[59].dcol, D[60].dval, D[60].drow, D[60].dcol, R[60].dval, R[60].drow, R[60].dcol, D[61].dval, D[61].drow, D[61].dcol, R[61].dval, R[61].drow, R[61].dcol, D[62].dval, D[62].drow, D[62].dcol, R[62].dval, R[62].drow, R[62].dcol, D[63].dval, D[63].drow, D[63].dcol, R[63].dval, R[63].drow, R[63].dcol, b.dval, x->dval ); //magma_zbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else { printf("error: invalid matrix count.\n"); } } else { printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
cd8058b5d17b026e208f6efca33ec8c0f1b77be0.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define PRECISION_z #define BLOCKSIZE 256 __global__ void magma_zk_testLocking(unsigned int* locks, int n) { int id = threadIdx.x % n; bool leaveLoop = false; while (!leaveLoop) { if (atomicExch(&(locks[id]), 1u) == 0u) { //critical section leaveLoop = true; atomicExch(&(locks[id]),0u); } } } /* __global__ void magma_zbajac_csr_o_ls_kernel(int localiters, int n, int matrices, int overlap, magma_z_matrix *D, magma_z_matrix *R, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { // int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; // int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; magmaDoubleComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; //valR = R[ (1+blockIdx.x-1)%matrices ].dval; //colR = R[ (1+blockIdx.x-1)%matrices ].dcol; //rowR = R[ (1+blockIdx.x-1)%matrices ].drow; //valD = D[ (1+blockIdx.x-1)%matrices ].dval; //colD = D[ (1+blockIdx.x-1)%matrices ].dcol; //rowD = D[ (1+blockIdx.x-1)%matrices ].drow; if (blockIdx.x%2 == 1) { valR = R[0].dval; valD = D[0].dval; colR = R[0].dcol; rowR = R[0].drow; colD = D[0].dcol; rowD = D[0].drow; } else { valR = R[1].dval; valD = D[1].dval; colR = R[1].dcol; rowR = R[1].drow; colD = D[1].dcol; rowD = D[1].drow; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; printf("bdx:%d idx:%d start:%d end:%d\n", blockIdx.x, threadIdx.x, start, end); #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for (i = start; i < end; i++) v += valR[i] * x[ colR[i] ]; start = rowD[index]; end = rowD[index+1]; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations local_x[threadIdx.x] = x[index]; //+ ( v - tmp); // / (valD[start]); __syncthreads(); #pragma unroll for (j = 0; j < localiters-1; j++) { tmp = zero; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if (threadIdx.x > overlap) { // RAS x[index] = local_x[threadIdx.x]; } } } */ __global__ void magma_zbajac_csr_o_ls_kernel1(int localiters, int n, int matrices, int overlap, magmaDoubleComplex * valD, magma_index_t * rowD, magma_index_t * colD, magmaDoubleComplex * valR, magma_index_t * rowR, magma_index_t * colR, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { int inddiag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; //bool leaveLoop = false; if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_zbajac_csr_o_ls_kernel2(int localiters, int n, int matrices, int overlap, magmaDoubleComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaDoubleComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaDoubleComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaDoubleComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; //bool leaveLoop = false; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; magmaDoubleComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if (blockIdx.x%matrices == 0) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if (blockIdx.x%matrices == 1) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_zbajac_csr_o_ls_kernel4(int localiters, int n, int matrices, int overlap, magmaDoubleComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaDoubleComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaDoubleComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaDoubleComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, magmaDoubleComplex * valD2, magma_index_t * rowD2, magma_index_t * colD2, magmaDoubleComplex * valR2, magma_index_t * rowR2, magma_index_t * colR2, magmaDoubleComplex * valD3, magma_index_t * rowD3, magma_index_t * colD3, magmaDoubleComplex * valR3, magma_index_t * rowR3, magma_index_t * colR3, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; //bool leaveLoop = false; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; magmaDoubleComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==1 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==2 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==3 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_zbajac_csr_o_ls_kernel8(int localiters, int n, int matrices, int overlap, magmaDoubleComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaDoubleComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaDoubleComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaDoubleComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, magmaDoubleComplex * valD2, magma_index_t * rowD2, magma_index_t * colD2, magmaDoubleComplex * valR2, magma_index_t * rowR2, magma_index_t * colR2, magmaDoubleComplex * valD3, magma_index_t * rowD3, magma_index_t * colD3, magmaDoubleComplex * valR3, magma_index_t * rowR3, magma_index_t * colR3, magmaDoubleComplex * valD4, magma_index_t * rowD4, magma_index_t * colD4, magmaDoubleComplex * valR4, magma_index_t * rowR4, magma_index_t * colR4, magmaDoubleComplex * valD5, magma_index_t * rowD5, magma_index_t * colD5, magmaDoubleComplex * valR5, magma_index_t * rowR5, magma_index_t * colR5, magmaDoubleComplex * valD6, magma_index_t * rowD6, magma_index_t * colD6, magmaDoubleComplex * valR6, magma_index_t * rowR6, magma_index_t * colR6, magmaDoubleComplex * valD7, magma_index_t * rowD7, magma_index_t * colD7, magmaDoubleComplex * valR7, magma_index_t * rowR7, magma_index_t * colR7, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; magmaDoubleComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if( blockIdx.x%matrices==0 ){ valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }else if ( blockIdx.x%matrices==1 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }else if ( blockIdx.x%matrices==2 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }else if ( blockIdx.x%matrices==3 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }else if ( blockIdx.x%matrices==4 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==5 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==6 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==7 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_zbajac_csr_o_ls_kernel16(int localiters, int n, int matrices, int overlap, magmaDoubleComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaDoubleComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaDoubleComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaDoubleComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaDoubleComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaDoubleComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaDoubleComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaDoubleComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaDoubleComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaDoubleComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaDoubleComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaDoubleComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaDoubleComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaDoubleComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaDoubleComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaDoubleComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaDoubleComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaDoubleComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaDoubleComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaDoubleComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaDoubleComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaDoubleComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaDoubleComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaDoubleComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaDoubleComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaDoubleComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaDoubleComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaDoubleComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaDoubleComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaDoubleComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaDoubleComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaDoubleComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; magmaDoubleComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==1 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==2 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==3 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==4 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==5 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==6 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==7 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==8 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==9 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==10 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==11 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==12 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==13 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==14 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==15 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_zbajac_csr_o_ls_kernel32(int localiters, int n, int matrices, int overlap, magmaDoubleComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaDoubleComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaDoubleComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaDoubleComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaDoubleComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaDoubleComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaDoubleComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaDoubleComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaDoubleComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaDoubleComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaDoubleComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaDoubleComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaDoubleComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaDoubleComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaDoubleComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaDoubleComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaDoubleComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaDoubleComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaDoubleComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaDoubleComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaDoubleComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaDoubleComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaDoubleComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaDoubleComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaDoubleComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaDoubleComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaDoubleComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaDoubleComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaDoubleComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaDoubleComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaDoubleComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaDoubleComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, magmaDoubleComplex *valD16, magma_index_t *rowD16, magma_index_t *colD16, magmaDoubleComplex *valR16, magma_index_t *rowR16, magma_index_t *colR16, magmaDoubleComplex *valD17, magma_index_t *rowD17, magma_index_t *colD17, magmaDoubleComplex *valR17, magma_index_t *rowR17, magma_index_t *colR17, magmaDoubleComplex *valD18, magma_index_t *rowD18, magma_index_t *colD18, magmaDoubleComplex *valR18, magma_index_t *rowR18, magma_index_t *colR18, magmaDoubleComplex *valD19, magma_index_t *rowD19, magma_index_t *colD19, magmaDoubleComplex *valR19, magma_index_t *rowR19, magma_index_t *colR19, magmaDoubleComplex *valD20, magma_index_t *rowD20, magma_index_t *colD20, magmaDoubleComplex *valR20, magma_index_t *rowR20, magma_index_t *colR20, magmaDoubleComplex *valD21, magma_index_t *rowD21, magma_index_t *colD21, magmaDoubleComplex *valR21, magma_index_t *rowR21, magma_index_t *colR21, magmaDoubleComplex *valD22, magma_index_t *rowD22, magma_index_t *colD22, magmaDoubleComplex *valR22, magma_index_t *rowR22, magma_index_t *colR22, magmaDoubleComplex *valD23, magma_index_t *rowD23, magma_index_t *colD23, magmaDoubleComplex *valR23, magma_index_t *rowR23, magma_index_t *colR23, magmaDoubleComplex *valD24, magma_index_t *rowD24, magma_index_t *colD24, magmaDoubleComplex *valR24, magma_index_t *rowR24, magma_index_t *colR24, magmaDoubleComplex *valD25, magma_index_t *rowD25, magma_index_t *colD25, magmaDoubleComplex *valR25, magma_index_t *rowR25, magma_index_t *colR25, magmaDoubleComplex *valD26, magma_index_t *rowD26, magma_index_t *colD26, magmaDoubleComplex *valR26, magma_index_t *rowR26, magma_index_t *colR26, magmaDoubleComplex *valD27, magma_index_t *rowD27, magma_index_t *colD27, magmaDoubleComplex *valR27, magma_index_t *rowR27, magma_index_t *colR27, magmaDoubleComplex *valD28, magma_index_t *rowD28, magma_index_t *colD28, magmaDoubleComplex *valR28, magma_index_t *rowR28, magma_index_t *colR28, magmaDoubleComplex *valD29, magma_index_t *rowD29, magma_index_t *colD29, magmaDoubleComplex *valR29, magma_index_t *rowR29, magma_index_t *colR29, magmaDoubleComplex *valD30, magma_index_t *rowD30, magma_index_t *colD30, magmaDoubleComplex *valR30, magma_index_t *rowR30, magma_index_t *colR30, magmaDoubleComplex *valD31, magma_index_t *rowD31, magma_index_t *colD31, magmaDoubleComplex *valR31, magma_index_t *rowR31, magma_index_t *colR31, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; magmaDoubleComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==1 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==2 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==3 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==4 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==5 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==6 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==7 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==8 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==9 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==10 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==11 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==12 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==13 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==14 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==15 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==16 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==17 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==18 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==19 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==20 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==21 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==22 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==23 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==24 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==25 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==26 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==27 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==28 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==29 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==30 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==31 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_zbajac_csr_o_ls_kernel64(int localiters, int n, int matrices, int overlap, magmaDoubleComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaDoubleComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaDoubleComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaDoubleComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaDoubleComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaDoubleComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaDoubleComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaDoubleComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaDoubleComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaDoubleComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaDoubleComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaDoubleComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaDoubleComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaDoubleComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaDoubleComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaDoubleComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaDoubleComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaDoubleComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaDoubleComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaDoubleComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaDoubleComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaDoubleComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaDoubleComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaDoubleComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaDoubleComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaDoubleComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaDoubleComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaDoubleComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaDoubleComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaDoubleComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaDoubleComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaDoubleComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, magmaDoubleComplex *valD16, magma_index_t *rowD16, magma_index_t *colD16, magmaDoubleComplex *valR16, magma_index_t *rowR16, magma_index_t *colR16, magmaDoubleComplex *valD17, magma_index_t *rowD17, magma_index_t *colD17, magmaDoubleComplex *valR17, magma_index_t *rowR17, magma_index_t *colR17, magmaDoubleComplex *valD18, magma_index_t *rowD18, magma_index_t *colD18, magmaDoubleComplex *valR18, magma_index_t *rowR18, magma_index_t *colR18, magmaDoubleComplex *valD19, magma_index_t *rowD19, magma_index_t *colD19, magmaDoubleComplex *valR19, magma_index_t *rowR19, magma_index_t *colR19, magmaDoubleComplex *valD20, magma_index_t *rowD20, magma_index_t *colD20, magmaDoubleComplex *valR20, magma_index_t *rowR20, magma_index_t *colR20, magmaDoubleComplex *valD21, magma_index_t *rowD21, magma_index_t *colD21, magmaDoubleComplex *valR21, magma_index_t *rowR21, magma_index_t *colR21, magmaDoubleComplex *valD22, magma_index_t *rowD22, magma_index_t *colD22, magmaDoubleComplex *valR22, magma_index_t *rowR22, magma_index_t *colR22, magmaDoubleComplex *valD23, magma_index_t *rowD23, magma_index_t *colD23, magmaDoubleComplex *valR23, magma_index_t *rowR23, magma_index_t *colR23, magmaDoubleComplex *valD24, magma_index_t *rowD24, magma_index_t *colD24, magmaDoubleComplex *valR24, magma_index_t *rowR24, magma_index_t *colR24, magmaDoubleComplex *valD25, magma_index_t *rowD25, magma_index_t *colD25, magmaDoubleComplex *valR25, magma_index_t *rowR25, magma_index_t *colR25, magmaDoubleComplex *valD26, magma_index_t *rowD26, magma_index_t *colD26, magmaDoubleComplex *valR26, magma_index_t *rowR26, magma_index_t *colR26, magmaDoubleComplex *valD27, magma_index_t *rowD27, magma_index_t *colD27, magmaDoubleComplex *valR27, magma_index_t *rowR27, magma_index_t *colR27, magmaDoubleComplex *valD28, magma_index_t *rowD28, magma_index_t *colD28, magmaDoubleComplex *valR28, magma_index_t *rowR28, magma_index_t *colR28, magmaDoubleComplex *valD29, magma_index_t *rowD29, magma_index_t *colD29, magmaDoubleComplex *valR29, magma_index_t *rowR29, magma_index_t *colR29, magmaDoubleComplex *valD30, magma_index_t *rowD30, magma_index_t *colD30, magmaDoubleComplex *valR30, magma_index_t *rowR30, magma_index_t *colR30, magmaDoubleComplex *valD31, magma_index_t *rowD31, magma_index_t *colD31, magmaDoubleComplex *valR31, magma_index_t *rowR31, magma_index_t *colR31, magmaDoubleComplex *valD32, magma_index_t *rowD32, magma_index_t *colD32, magmaDoubleComplex *valR32, magma_index_t *rowR32, magma_index_t *colR32, magmaDoubleComplex *valD33, magma_index_t *rowD33, magma_index_t *colD33, magmaDoubleComplex *valR33, magma_index_t *rowR33, magma_index_t *colR33, magmaDoubleComplex *valD34, magma_index_t *rowD34, magma_index_t *colD34, magmaDoubleComplex *valR34, magma_index_t *rowR34, magma_index_t *colR34, magmaDoubleComplex *valD35, magma_index_t *rowD35, magma_index_t *colD35, magmaDoubleComplex *valR35, magma_index_t *rowR35, magma_index_t *colR35, magmaDoubleComplex *valD36, magma_index_t *rowD36, magma_index_t *colD36, magmaDoubleComplex *valR36, magma_index_t *rowR36, magma_index_t *colR36, magmaDoubleComplex *valD37, magma_index_t *rowD37, magma_index_t *colD37, magmaDoubleComplex *valR37, magma_index_t *rowR37, magma_index_t *colR37, magmaDoubleComplex *valD38, magma_index_t *rowD38, magma_index_t *colD38, magmaDoubleComplex *valR38, magma_index_t *rowR38, magma_index_t *colR38, magmaDoubleComplex *valD39, magma_index_t *rowD39, magma_index_t *colD39, magmaDoubleComplex *valR39, magma_index_t *rowR39, magma_index_t *colR39, magmaDoubleComplex *valD40, magma_index_t *rowD40, magma_index_t *colD40, magmaDoubleComplex *valR40, magma_index_t *rowR40, magma_index_t *colR40, magmaDoubleComplex *valD41, magma_index_t *rowD41, magma_index_t *colD41, magmaDoubleComplex *valR41, magma_index_t *rowR41, magma_index_t *colR41, magmaDoubleComplex *valD42, magma_index_t *rowD42, magma_index_t *colD42, magmaDoubleComplex *valR42, magma_index_t *rowR42, magma_index_t *colR42, magmaDoubleComplex *valD43, magma_index_t *rowD43, magma_index_t *colD43, magmaDoubleComplex *valR43, magma_index_t *rowR43, magma_index_t *colR43, magmaDoubleComplex *valD44, magma_index_t *rowD44, magma_index_t *colD44, magmaDoubleComplex *valR44, magma_index_t *rowR44, magma_index_t *colR44, magmaDoubleComplex *valD45, magma_index_t *rowD45, magma_index_t *colD45, magmaDoubleComplex *valR45, magma_index_t *rowR45, magma_index_t *colR45, magmaDoubleComplex *valD46, magma_index_t *rowD46, magma_index_t *colD46, magmaDoubleComplex *valR46, magma_index_t *rowR46, magma_index_t *colR46, magmaDoubleComplex *valD47, magma_index_t *rowD47, magma_index_t *colD47, magmaDoubleComplex *valR47, magma_index_t *rowR47, magma_index_t *colR47, magmaDoubleComplex *valD48, magma_index_t *rowD48, magma_index_t *colD48, magmaDoubleComplex *valR48, magma_index_t *rowR48, magma_index_t *colR48, magmaDoubleComplex *valD49, magma_index_t *rowD49, magma_index_t *colD49, magmaDoubleComplex *valR49, magma_index_t *rowR49, magma_index_t *colR49, magmaDoubleComplex *valD50, magma_index_t *rowD50, magma_index_t *colD50, magmaDoubleComplex *valR50, magma_index_t *rowR50, magma_index_t *colR50, magmaDoubleComplex *valD51, magma_index_t *rowD51, magma_index_t *colD51, magmaDoubleComplex *valR51, magma_index_t *rowR51, magma_index_t *colR51, magmaDoubleComplex *valD52, magma_index_t *rowD52, magma_index_t *colD52, magmaDoubleComplex *valR52, magma_index_t *rowR52, magma_index_t *colR52, magmaDoubleComplex *valD53, magma_index_t *rowD53, magma_index_t *colD53, magmaDoubleComplex *valR53, magma_index_t *rowR53, magma_index_t *colR53, magmaDoubleComplex *valD54, magma_index_t *rowD54, magma_index_t *colD54, magmaDoubleComplex *valR54, magma_index_t *rowR54, magma_index_t *colR54, magmaDoubleComplex *valD55, magma_index_t *rowD55, magma_index_t *colD55, magmaDoubleComplex *valR55, magma_index_t *rowR55, magma_index_t *colR55, magmaDoubleComplex *valD56, magma_index_t *rowD56, magma_index_t *colD56, magmaDoubleComplex *valR56, magma_index_t *rowR56, magma_index_t *colR56, magmaDoubleComplex *valD57, magma_index_t *rowD57, magma_index_t *colD57, magmaDoubleComplex *valR57, magma_index_t *rowR57, magma_index_t *colR57, magmaDoubleComplex *valD58, magma_index_t *rowD58, magma_index_t *colD58, magmaDoubleComplex *valR58, magma_index_t *rowR58, magma_index_t *colR58, magmaDoubleComplex *valD59, magma_index_t *rowD59, magma_index_t *colD59, magmaDoubleComplex *valR59, magma_index_t *rowR59, magma_index_t *colR59, magmaDoubleComplex *valD60, magma_index_t *rowD60, magma_index_t *colD60, magmaDoubleComplex *valR60, magma_index_t *rowR60, magma_index_t *colR60, magmaDoubleComplex *valD61, magma_index_t *rowD61, magma_index_t *colD61, magmaDoubleComplex *valR61, magma_index_t *rowR61, magma_index_t *colR61, magmaDoubleComplex *valD62, magma_index_t *rowD62, magma_index_t *colD62, magmaDoubleComplex *valR62, magma_index_t *rowR62, magma_index_t *colR62, magmaDoubleComplex *valD63, magma_index_t *rowD63, magma_index_t *colD63, magmaDoubleComplex *valR63, magma_index_t *rowR63, magma_index_t *colR63, const magmaDoubleComplex * __restrict__ b, magmaDoubleComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex bl, tmp = zero, v = zero; magmaDoubleComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR63; valD = valD63; colR = colR63; rowR = rowR63; colD = colD63; rowD = rowD63; } else if ( blockIdx.x%matrices==1 ) { valR = valR62; valD = valD62; colR = colR62; rowR = rowR62; colD = colD62; rowD = rowD62; } else if ( blockIdx.x%matrices==2 ) { valR = valR61; valD = valD61; colR = colR61; rowR = rowR61; colD = colD61; rowD = rowD61; } else if ( blockIdx.x%matrices==3 ) { valR = valR60; valD = valD60; colR = colR60; rowR = rowR60; colD = colD60; rowD = rowD60; } else if ( blockIdx.x%matrices==4 ) { valR = valR59; valD = valD59; colR = colR59; rowR = rowR59; colD = colD59; rowD = rowD59; } else if ( blockIdx.x%matrices==5 ) { valR = valR58; valD = valD58; colR = colR58; rowR = rowR58; colD = colD58; rowD = rowD58; } else if ( blockIdx.x%matrices==6 ) { valR = valR57; valD = valD57; colR = colR57; rowR = rowR57; colD = colD57; rowD = rowD57; } else if ( blockIdx.x%matrices==7 ) { valR = valR56; valD = valD56; colR = colR56; rowR = rowR56; colD = colD56; rowD = rowD56; } else if ( blockIdx.x%matrices==8 ) { valR = valR55; valD = valD55; colR = colR55; rowR = rowR55; colD = colD55; rowD = rowD55; } else if ( blockIdx.x%matrices==9 ) { valR = valR54; valD = valD54; colR = colR54; rowR = rowR54; colD = colD54; rowD = rowD54; } else if ( blockIdx.x%matrices==10 ) { valR = valR53; valD = valD53; colR = colR53; rowR = rowR53; colD = colD53; rowD = rowD53; } else if ( blockIdx.x%matrices==11 ) { valR = valR52; valD = valD52; colR = colR52; rowR = rowR52; colD = colD52; rowD = rowD52; } else if ( blockIdx.x%matrices==12 ) { valR = valR51; valD = valD51; colR = colR51; rowR = rowR51; colD = colD51; rowD = rowD51; } else if ( blockIdx.x%matrices==13 ) { valR = valR50; valD = valD50; colR = colR50; rowR = rowR50; colD = colD50; rowD = rowD50; } else if ( blockIdx.x%matrices==14 ) { valR = valR49; valD = valD49; colR = colR49; rowR = rowR49; colD = colD49; rowD = rowD49; } else if ( blockIdx.x%matrices==15 ) { valR = valR48; valD = valD48; colR = colR48; rowR = rowR48; colD = colD48; rowD = rowD48; } else if ( blockIdx.x%matrices==16 ) { valR = valR47; valD = valD47; colR = colR47; rowR = rowR47; colD = colD47; rowD = rowD47; } else if ( blockIdx.x%matrices==17 ) { valR = valR46; valD = valD46; colR = colR46; rowR = rowR46; colD = colD46; rowD = rowD46; } else if ( blockIdx.x%matrices==18 ) { valR = valR45; valD = valD45; colR = colR45; rowR = rowR45; colD = colD45; rowD = rowD45; } else if ( blockIdx.x%matrices==19 ) { valR = valR44; valD = valD44; colR = colR44; rowR = rowR44; colD = colD44; rowD = rowD44; } else if ( blockIdx.x%matrices==20 ) { valR = valR43; valD = valD43; colR = colR43; rowR = rowR43; colD = colD43; rowD = rowD43; } else if ( blockIdx.x%matrices==21 ) { valR = valR42; valD = valD42; colR = colR42; rowR = rowR42; colD = colD42; rowD = rowD42; } else if ( blockIdx.x%matrices==22 ) { valR = valR41; valD = valD41; colR = colR41; rowR = rowR41; colD = colD41; rowD = rowD41; } else if ( blockIdx.x%matrices==23 ) { valR = valR40; valD = valD40; colR = colR40; rowR = rowR40; colD = colD40; rowD = rowD40; } else if ( blockIdx.x%matrices==24 ) { valR = valR39; valD = valD39; colR = colR39; rowR = rowR39; colD = colD39; rowD = rowD39; } else if ( blockIdx.x%matrices==25 ) { valR = valR38; valD = valD38; colR = colR38; rowR = rowR38; colD = colD38; rowD = rowD38; } else if ( blockIdx.x%matrices==26 ) { valR = valR37; valD = valD37; colR = colR37; rowR = rowR37; colD = colD37; rowD = rowD37; } else if ( blockIdx.x%matrices==27 ) { valR = valR36; valD = valD36; colR = colR36; rowR = rowR36; colD = colD36; rowD = rowD36; } else if ( blockIdx.x%matrices==28 ) { valR = valR35; valD = valD35; colR = colR35; rowR = rowR35; colD = colD35; rowD = rowD35; } else if ( blockIdx.x%matrices==29 ) { valR = valR34; valD = valD34; colR = colR34; rowR = rowR34; colD = colD34; rowD = rowD34; } else if ( blockIdx.x%matrices==30 ) { valR = valR33; valD = valD33; colR = colR33; rowR = rowR33; colD = colD33; rowD = rowD33; } else if ( blockIdx.x%matrices==31 ) { valR = valR32; valD = valD32; colR = colR32; rowR = rowR32; colD = colD32; rowD = rowD32; } else if ( blockIdx.x%matrices==32 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==33 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==34 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==35 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==36 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==37 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==38 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==39 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==40 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==41 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==42 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==43 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==44 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==45 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==46 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==47 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==48 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==49 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==50 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==51 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==52 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==53 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==54 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==55 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==56 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==57 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==58 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==59 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==60 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==61 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==62 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==63 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaDoubleComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration with directed restricted additive Schwarz overlap (top-down) performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param[in] localiters magma_int_t number of local Jacobi-like updates @param[in] matrices magma_int_t number of sub-matrices @param[in] overlap magma_int_t size of the overlap @param[in] D magma_z_matrix* set of matrices with diagonal blocks @param[in] R magma_z_matrix* set of matrices with non-diagonal parts @param[in] b magma_z_matrix RHS @param[in] x magma_z_matrix* iterate/solution @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbajac_csr_overlap( magma_int_t localiters, magma_int_t matrices, magma_int_t overlap, magma_z_matrix *D, magma_z_matrix *R, magma_z_matrix b, magma_z_matrix *x, magma_queue_t queue ) { int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int size = D[0].num_rows; int min_nnz=100; for(int i=0; i<matrices; i++){ min_nnz = min(min_nnz, R[i].nnz); } if ( min_nnz > -1 ){ if ( matrices == 1 ){ int dimgrid1 = magma_ceildiv( size , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_zbajac_csr_o_ls_kernel1<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, b.dval, x->dval ); } else if (matrices == 2) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_zbajac_csr_o_ls_kernel2<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, b.dval, x->dval ); //magma_zbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 4){ int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_zbajac_csr_o_ls_kernel4<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, b.dval, x->dval ); //magma_zbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 8) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_zbajac_csr_o_ls_kernel8<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, D[4].dval, D[4].drow, D[4].dcol, R[4].dval, R[4].drow, R[4].dcol, D[5].dval, D[5].drow, D[5].dcol, R[5].dval, R[5].drow, R[5].dcol, D[6].dval, D[6].drow, D[6].dcol, R[6].dval, R[6].drow, R[6].dcol, D[7].dval, D[7].drow, D[7].dcol, R[7].dval, R[7].drow, R[7].dcol, b.dval, x->dval ); //magma_zbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 16) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_zbajac_csr_o_ls_kernel16<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, b.dval, x->dval ); } else if (matrices == 32) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_zbajac_csr_o_ls_kernel32<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, b.dval, x->dval ); } else if (matrices == 64) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_zbajac_csr_o_ls_kernel64<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, D[32].dval, D[32].drow, D[32].dcol, R[32].dval, R[32].drow, R[32].dcol, D[33].dval, D[33].drow, D[33].dcol, R[33].dval, R[33].drow, R[33].dcol, D[34].dval, D[34].drow, D[34].dcol, R[34].dval, R[34].drow, R[34].dcol, D[35].dval, D[35].drow, D[35].dcol, R[35].dval, R[35].drow, R[35].dcol, D[36].dval, D[36].drow, D[36].dcol, R[36].dval, R[36].drow, R[36].dcol, D[37].dval, D[37].drow, D[37].dcol, R[37].dval, R[37].drow, R[37].dcol, D[38].dval, D[38].drow, D[38].dcol, R[38].dval, R[38].drow, R[38].dcol, D[39].dval, D[39].drow, D[39].dcol, R[39].dval, R[39].drow, R[39].dcol, D[40].dval, D[40].drow, D[40].dcol, R[40].dval, R[40].drow, R[40].dcol, D[41].dval, D[41].drow, D[41].dcol, R[41].dval, R[41].drow, R[41].dcol, D[42].dval, D[42].drow, D[42].dcol, R[42].dval, R[42].drow, R[42].dcol, D[43].dval, D[43].drow, D[43].dcol, R[43].dval, R[43].drow, R[43].dcol, D[44].dval, D[44].drow, D[44].dcol, R[44].dval, R[44].drow, R[44].dcol, D[45].dval, D[45].drow, D[45].dcol, R[45].dval, R[45].drow, R[45].dcol, D[46].dval, D[46].drow, D[46].dcol, R[46].dval, R[46].drow, R[46].dcol, D[47].dval, D[47].drow, D[47].dcol, R[47].dval, R[47].drow, R[47].dcol, D[48].dval, D[48].drow, D[48].dcol, R[48].dval, R[48].drow, R[48].dcol, D[49].dval, D[49].drow, D[49].dcol, R[49].dval, R[49].drow, R[49].dcol, D[50].dval, D[50].drow, D[50].dcol, R[50].dval, R[50].drow, R[50].dcol, D[51].dval, D[51].drow, D[51].dcol, R[51].dval, R[51].drow, R[51].dcol, D[52].dval, D[52].drow, D[52].dcol, R[52].dval, R[52].drow, R[52].dcol, D[53].dval, D[53].drow, D[53].dcol, R[53].dval, R[53].drow, R[53].dcol, D[54].dval, D[54].drow, D[54].dcol, R[54].dval, R[54].drow, R[54].dcol, D[55].dval, D[55].drow, D[55].dcol, R[55].dval, R[55].drow, R[55].dcol, D[56].dval, D[56].drow, D[56].dcol, R[56].dval, R[56].drow, R[56].dcol, D[57].dval, D[57].drow, D[57].dcol, R[57].dval, R[57].drow, R[57].dcol, D[58].dval, D[58].drow, D[58].dcol, R[58].dval, R[58].drow, R[58].dcol, D[59].dval, D[59].drow, D[59].dcol, R[59].dval, R[59].drow, R[59].dcol, D[60].dval, D[60].drow, D[60].dcol, R[60].dval, R[60].drow, R[60].dcol, D[61].dval, D[61].drow, D[61].dcol, R[61].dval, R[61].drow, R[61].dcol, D[62].dval, D[62].drow, D[62].dcol, R[62].dval, R[62].drow, R[62].dcol, D[63].dval, D[63].drow, D[63].dcol, R[63].dval, R[63].drow, R[63].dcol, b.dval, x->dval ); //magma_zbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else { printf("error: invalid matrix count.\n"); } } else { printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
2dbeaf72f00a7efc593d19bb63ce15ea2ed778ea.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/conv_cudnn_op_cache.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { #if CUDNN_VERSION >= 7100 using Tensor = framework::Tensor; using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; using ScopedActivationDescriptor = platform::ScopedActivationDescriptor; using DataLayout = platform::DataLayout; using ScopedPoolingDescriptor = platform::ScopedPoolingDescriptor; using PoolingMode = platform::PoolingMode; template <typename T> using ScalingParamType = typename platform::CudnnDataType<T>::ScalingParamType; template <typename T> using CudnnDataType = platform::CudnnDataType<T>; template <typename T> class CUDNNConvInceptionFusionOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context<phi::GPUContext>(); auto* input = ctx.Input<Tensor>("Input"); auto filters = ctx.MultiInput<framework::Tensor>("Filter"); auto bias = ctx.MultiInput<framework::Tensor>("Bias"); auto* output = ctx.Output<Tensor>("Output"); auto temp_outs = ctx.MultiOutput<framework::Tensor>("TempOutput"); const std::string pool_type = ctx.Attr<std::string>("pooling_type"); const std::string activation = ctx.Attr<std::string>("activation"); const bool exclusive = ctx.Attr<bool>("exclusive"); int64_t user_workspace_size = static_cast<size_t>(ctx.Attr<int>("workspace_size_MB")); const T* input_data = input->data<T>(); T* output_data = dev_ctx.Alloc<T>(output, output->numel() * sizeof(T)); temp_outs[0]->Resize(input->dims()); T* temp_data = dev_ctx.Alloc<T>(temp_outs[0], temp_outs[0]->numel() * sizeof(T)); DataLayout layout = DataLayout::kNCHW; std::vector<int> in_dim = phi::vectorize<int>(input->dims()); // ------------------- cudnn descriptors --------------------- PoolingMode pooling_mode; if (pool_type == "max") { pooling_mode = PoolingMode::kMaximum; } else { pooling_mode = exclusive ? PoolingMode::kAverageExclusive : (PoolingMode::kAverageInclusive); } std::vector<int> k0x0 = {0, 0}; std::vector<int> k1x1 = {1, 1}; std::vector<int> k1x1_2 = {1, 1}; std::vector<int> k3x3 = {3, 3}; ScopedPoolingDescriptor pool_desc; ScopedActivationDescriptor act_desc; ScopedTensorDescriptor out_pool_desc; ScopedTensorDescriptor input_desc; cudnnPoolingDescriptor_t cudnn_pool_desc = pool_desc.descriptor(pooling_mode, k3x3, k1x1, k1x1); cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(layout, phi::vectorize<int>(input->dims())); cudnnTensorDescriptor_t pool_out_desc = out_pool_desc.descriptor<T>(layout, phi::vectorize<int>(input->dims())); cudnnDataType_t cudnn_dtype = CudnnDataType<T>::type; cudnnTensorDescriptor_t* out_desc = new cudnnTensorDescriptor_t[4]; cudnnFilterDescriptor_t* filter_desc = new cudnnFilterDescriptor_t[4]; cudnnTensorDescriptor_t* bias_desc = new cudnnTensorDescriptor_t[4]; cudnnTensorDescriptor_t* in_desc = new cudnnTensorDescriptor_t[4]; cudnnConvolutionDescriptor_t* conv_desc = new cudnnConvolutionDescriptor_t[4]; for (int i = 0; i < 4; ++i) { PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateFilterDescriptor(&filter_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bias_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&in_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&out_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateConvolutionDescriptor(&conv_desc[i])); } std::vector<std::vector<int>> filter_dims; std::vector<std::vector<int>> bias_dims; std::vector<std::vector<int>> in_dims; std::vector<std::vector<int>> out_dims; std::vector<std::vector<int>> in_strides; std::vector<std::vector<int>> out_strides; std::vector<std::vector<int>> bias_strides; cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW; int n = in_dim[0]; int h = in_dim[2]; int w = in_dim[3]; int oc = output->dims()[1]; cudnnDataType_t compute_type = (cudnn_dtype == CUDNN_DATA_DOUBLE) ? CUDNN_DATA_DOUBLE : CUDNN_DATA_FLOAT; for (int i = 0; i < 4; ++i) { filter_dims.push_back(phi::vectorize<int>(filters[i]->dims())); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetFilterNdDescriptor( filter_desc[i], cudnn_dtype, format, 4, filter_dims[i].data())); bias_dims.push_back({1, filter_dims[i][0], 1, 1}); bias_strides.push_back({filter_dims[i][0], 1, 1, 1}); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( bias_desc[i], cudnn_dtype, 4, bias_dims[i].data(), bias_strides[i].data())); in_dims.push_back({n, filter_dims[i][1], h, w}); out_dims.push_back({n, filter_dims[i][0], h, w}); in_strides.push_back({filter_dims[i][1] * h * w, h * w, w, 1}); out_strides.push_back({oc * h * w, h * w, w, 1}); if (i < 2) { PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetConvolutionNdDescriptor( conv_desc[i], 2, k0x0.data(), k1x1.data(), k1x1.data(), CUDNN_CROSS_CORRELATION, compute_type)); } else { PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetConvolutionNdDescriptor( conv_desc[i], 2, k1x1.data(), k1x1.data(), k1x1.data(), CUDNN_CROSS_CORRELATION, compute_type)); } PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( conv_desc[i], CUDNN_DEFAULT_MATH)); #if TORCH_HIP_VERSION >= 11000 && CUDNN_VERSION >= 8000 if (!platform::allow_tf32_cudnn) { PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetConvolutionMathType(conv_desc[i], CUDNN_FMA_MATH)); } #endif // TORCH_HIP_VERSION >= 11000 && CUDNN_VERSION >= 8000 } in_dims[2][1] *= 2; in_strides[2][0] = oc * h * w; out_strides[2][0] = filter_dims[2][0] * h * w; // this out is continuous. in_strides[3][0] = filter_dims[2][0] * h * w; PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetConvolutionGroupCount(conv_desc[2], 2)); cudnnConvolutionFwdAlgo_t algo[4]; auto handle = dev_ctx.cudnn_handle(); size_t workspace_size_in_bytes = 0; // final workspace to allocate. size_t workspace_size_limit = 0; if (FLAGS_conv_workspace_size_limit > 0 || user_workspace_size > 0) { int64_t max_user_size = ::min(static_cast<int64_t>(FLAGS_conv_workspace_size_limit), user_workspace_size); workspace_size_limit = max_user_size * 1024 * 1024; } for (int i = 0; i < 4; ++i) { PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( in_desc[i], cudnn_dtype, 4, in_dims[i].data(), in_strides[i].data())); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetTensorNdDescriptor(out_desc[i], cudnn_dtype, 4, out_dims[i].data(), out_strides[i].data())); int perf_count; int best_algo_idx = 0; size_t tmp_size = 0; std::unique_ptr<cudnnConvolutionFwdAlgoPerf_t[]> perf_results( new cudnnConvolutionFwdAlgoPerf_t[kNUM_CUDNN_FWD_ALGS]); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionForwardAlgorithm_v7( handle, in_desc[i], filter_desc[i], conv_desc[i], out_desc[i], kNUM_CUDNN_FWD_ALGS, &perf_count, perf_results.get())); algo[i] = (perf_results.get())[best_algo_idx].algo; PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( handle, in_desc[i], filter_desc[i], conv_desc[i], out_desc[i], algo[i], &tmp_size)); workspace_size_in_bytes = ::max(workspace_size_in_bytes, tmp_size); } cudnnActivationDescriptor_t cudnn_act_desc = act_desc.descriptor<T>(activation); int oc0 = filter_dims[0][0]; int oc1 = filter_dims[1][0] - filter_dims[2][1] * 2; int oc3 = filter_dims[3][0]; int oc2 = oc - oc0 - oc1 - oc3; // branch1: pool + 1x1 conv ScalingParamType<T> alpha = 1.0f, beta = 0.0f; PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnPoolingForward(handle, cudnn_pool_desc, &alpha, cudnn_input_desc, input_data, &beta, pool_out_desc, temp_data)); std::vector<const void*> in_datas; in_datas.push_back(static_cast<const void*>(temp_data)); in_datas.push_back(static_cast<const void*>(input_data)); in_datas.push_back( static_cast<const void*>(output_data + (oc0 + oc1) * h * w)); temp_outs[1]->Resize(phi::make_ddim(out_dims[2])); T* temp2_data = dev_ctx.Alloc<T>(temp_outs[1], temp_outs[1]->numel() * sizeof(T)); in_datas.push_back(static_cast<const void*>(temp2_data + oc2 * h * w)); std::vector<void*> out_datas; out_datas.push_back(static_cast<void*>(output_data)); out_datas.push_back(static_cast<void*>(output_data + oc0 * h * w)); out_datas.push_back(static_cast<void*>(temp2_data)); out_datas.push_back( static_cast<void*>(output_data + (oc0 + oc1 + oc2) * h * w)); for (int i = 0; i < 4; ++i) { auto func = [&](void* cudnn_workspace) { PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionBiasActivationForward( handle, &alpha, in_desc[i], in_datas[i], filter_desc[i], static_cast<const void*>(filters[i]->data<T>()), conv_desc[i], algo[i], cudnn_workspace, workspace_size_in_bytes, &beta, out_desc[i], out_datas[i], bias_desc[i], static_cast<const void*>(bias[i]->data<T>()), cudnn_act_desc, out_desc[i], out_datas[i])); }; auto workspace_handle = dev_ctx.cudnn_workspace_handle(); workspace_handle.RunFunc(func, workspace_size_in_bytes); } cudnnTensorDescriptor_t x_desc; cudnnTensorDescriptor_t y_desc; PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&x_desc)); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&y_desc)); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( x_desc, cudnn_dtype, 4, out_dims[3].data(), out_strides[2].data())); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( y_desc, cudnn_dtype, 4, out_dims[3].data(), out_strides[3].data())); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnTransformTensor( handle, CudnnDataType<T>::kOne(), x_desc, static_cast<const void*>(out_datas[2]), CudnnDataType<T>::kZero(), y_desc, static_cast<void*>(output_data + (oc0 + oc1) * h * w))); for (int i = 0; i < 4; ++i) { PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(in_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(out_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyFilterDescriptor(filter_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bias_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyConvolutionDescriptor(conv_desc[i])); } PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(x_desc)); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(y_desc)); } }; #endif } // namespace operators } // namespace paddle #if CUDNN_VERSION >= 7100 namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(conv2d_inception_fusion, ops::CUDNNConvInceptionFusionOpKernel<float>, ops::CUDNNConvInceptionFusionOpKernel<double>); #endif
2dbeaf72f00a7efc593d19bb63ce15ea2ed778ea.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/conv_cudnn_op_cache.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" namespace paddle { namespace operators { #if CUDNN_VERSION >= 7100 using Tensor = framework::Tensor; using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; using ScopedActivationDescriptor = platform::ScopedActivationDescriptor; using DataLayout = platform::DataLayout; using ScopedPoolingDescriptor = platform::ScopedPoolingDescriptor; using PoolingMode = platform::PoolingMode; template <typename T> using ScalingParamType = typename platform::CudnnDataType<T>::ScalingParamType; template <typename T> using CudnnDataType = platform::CudnnDataType<T>; template <typename T> class CUDNNConvInceptionFusionOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context<phi::GPUContext>(); auto* input = ctx.Input<Tensor>("Input"); auto filters = ctx.MultiInput<framework::Tensor>("Filter"); auto bias = ctx.MultiInput<framework::Tensor>("Bias"); auto* output = ctx.Output<Tensor>("Output"); auto temp_outs = ctx.MultiOutput<framework::Tensor>("TempOutput"); const std::string pool_type = ctx.Attr<std::string>("pooling_type"); const std::string activation = ctx.Attr<std::string>("activation"); const bool exclusive = ctx.Attr<bool>("exclusive"); int64_t user_workspace_size = static_cast<size_t>(ctx.Attr<int>("workspace_size_MB")); const T* input_data = input->data<T>(); T* output_data = dev_ctx.Alloc<T>(output, output->numel() * sizeof(T)); temp_outs[0]->Resize(input->dims()); T* temp_data = dev_ctx.Alloc<T>(temp_outs[0], temp_outs[0]->numel() * sizeof(T)); DataLayout layout = DataLayout::kNCHW; std::vector<int> in_dim = phi::vectorize<int>(input->dims()); // ------------------- cudnn descriptors --------------------- PoolingMode pooling_mode; if (pool_type == "max") { pooling_mode = PoolingMode::kMaximum; } else { pooling_mode = exclusive ? PoolingMode::kAverageExclusive : (PoolingMode::kAverageInclusive); } std::vector<int> k0x0 = {0, 0}; std::vector<int> k1x1 = {1, 1}; std::vector<int> k1x1_2 = {1, 1}; std::vector<int> k3x3 = {3, 3}; ScopedPoolingDescriptor pool_desc; ScopedActivationDescriptor act_desc; ScopedTensorDescriptor out_pool_desc; ScopedTensorDescriptor input_desc; cudnnPoolingDescriptor_t cudnn_pool_desc = pool_desc.descriptor(pooling_mode, k3x3, k1x1, k1x1); cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(layout, phi::vectorize<int>(input->dims())); cudnnTensorDescriptor_t pool_out_desc = out_pool_desc.descriptor<T>(layout, phi::vectorize<int>(input->dims())); cudnnDataType_t cudnn_dtype = CudnnDataType<T>::type; cudnnTensorDescriptor_t* out_desc = new cudnnTensorDescriptor_t[4]; cudnnFilterDescriptor_t* filter_desc = new cudnnFilterDescriptor_t[4]; cudnnTensorDescriptor_t* bias_desc = new cudnnTensorDescriptor_t[4]; cudnnTensorDescriptor_t* in_desc = new cudnnTensorDescriptor_t[4]; cudnnConvolutionDescriptor_t* conv_desc = new cudnnConvolutionDescriptor_t[4]; for (int i = 0; i < 4; ++i) { PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateFilterDescriptor(&filter_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bias_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&in_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&out_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateConvolutionDescriptor(&conv_desc[i])); } std::vector<std::vector<int>> filter_dims; std::vector<std::vector<int>> bias_dims; std::vector<std::vector<int>> in_dims; std::vector<std::vector<int>> out_dims; std::vector<std::vector<int>> in_strides; std::vector<std::vector<int>> out_strides; std::vector<std::vector<int>> bias_strides; cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW; int n = in_dim[0]; int h = in_dim[2]; int w = in_dim[3]; int oc = output->dims()[1]; cudnnDataType_t compute_type = (cudnn_dtype == CUDNN_DATA_DOUBLE) ? CUDNN_DATA_DOUBLE : CUDNN_DATA_FLOAT; for (int i = 0; i < 4; ++i) { filter_dims.push_back(phi::vectorize<int>(filters[i]->dims())); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetFilterNdDescriptor( filter_desc[i], cudnn_dtype, format, 4, filter_dims[i].data())); bias_dims.push_back({1, filter_dims[i][0], 1, 1}); bias_strides.push_back({filter_dims[i][0], 1, 1, 1}); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( bias_desc[i], cudnn_dtype, 4, bias_dims[i].data(), bias_strides[i].data())); in_dims.push_back({n, filter_dims[i][1], h, w}); out_dims.push_back({n, filter_dims[i][0], h, w}); in_strides.push_back({filter_dims[i][1] * h * w, h * w, w, 1}); out_strides.push_back({oc * h * w, h * w, w, 1}); if (i < 2) { PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetConvolutionNdDescriptor( conv_desc[i], 2, k0x0.data(), k1x1.data(), k1x1.data(), CUDNN_CROSS_CORRELATION, compute_type)); } else { PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetConvolutionNdDescriptor( conv_desc[i], 2, k1x1.data(), k1x1.data(), k1x1.data(), CUDNN_CROSS_CORRELATION, compute_type)); } PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType( conv_desc[i], CUDNN_DEFAULT_MATH)); #if CUDA_VERSION >= 11000 && CUDNN_VERSION >= 8000 if (!platform::allow_tf32_cudnn) { PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetConvolutionMathType(conv_desc[i], CUDNN_FMA_MATH)); } #endif // CUDA_VERSION >= 11000 && CUDNN_VERSION >= 8000 } in_dims[2][1] *= 2; in_strides[2][0] = oc * h * w; out_strides[2][0] = filter_dims[2][0] * h * w; // this out is continuous. in_strides[3][0] = filter_dims[2][0] * h * w; PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetConvolutionGroupCount(conv_desc[2], 2)); cudnnConvolutionFwdAlgo_t algo[4]; auto handle = dev_ctx.cudnn_handle(); size_t workspace_size_in_bytes = 0; // final workspace to allocate. size_t workspace_size_limit = 0; if (FLAGS_conv_workspace_size_limit > 0 || user_workspace_size > 0) { int64_t max_user_size = std::min(static_cast<int64_t>(FLAGS_conv_workspace_size_limit), user_workspace_size); workspace_size_limit = max_user_size * 1024 * 1024; } for (int i = 0; i < 4; ++i) { PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( in_desc[i], cudnn_dtype, 4, in_dims[i].data(), in_strides[i].data())); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnSetTensorNdDescriptor(out_desc[i], cudnn_dtype, 4, out_dims[i].data(), out_strides[i].data())); int perf_count; int best_algo_idx = 0; size_t tmp_size = 0; std::unique_ptr<cudnnConvolutionFwdAlgoPerf_t[]> perf_results( new cudnnConvolutionFwdAlgoPerf_t[kNUM_CUDNN_FWD_ALGS]); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionForwardAlgorithm_v7( handle, in_desc[i], filter_desc[i], conv_desc[i], out_desc[i], kNUM_CUDNN_FWD_ALGS, &perf_count, perf_results.get())); algo[i] = (perf_results.get())[best_algo_idx].algo; PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( handle, in_desc[i], filter_desc[i], conv_desc[i], out_desc[i], algo[i], &tmp_size)); workspace_size_in_bytes = std::max(workspace_size_in_bytes, tmp_size); } cudnnActivationDescriptor_t cudnn_act_desc = act_desc.descriptor<T>(activation); int oc0 = filter_dims[0][0]; int oc1 = filter_dims[1][0] - filter_dims[2][1] * 2; int oc3 = filter_dims[3][0]; int oc2 = oc - oc0 - oc1 - oc3; // branch1: pool + 1x1 conv ScalingParamType<T> alpha = 1.0f, beta = 0.0f; PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnPoolingForward(handle, cudnn_pool_desc, &alpha, cudnn_input_desc, input_data, &beta, pool_out_desc, temp_data)); std::vector<const void*> in_datas; in_datas.push_back(static_cast<const void*>(temp_data)); in_datas.push_back(static_cast<const void*>(input_data)); in_datas.push_back( static_cast<const void*>(output_data + (oc0 + oc1) * h * w)); temp_outs[1]->Resize(phi::make_ddim(out_dims[2])); T* temp2_data = dev_ctx.Alloc<T>(temp_outs[1], temp_outs[1]->numel() * sizeof(T)); in_datas.push_back(static_cast<const void*>(temp2_data + oc2 * h * w)); std::vector<void*> out_datas; out_datas.push_back(static_cast<void*>(output_data)); out_datas.push_back(static_cast<void*>(output_data + oc0 * h * w)); out_datas.push_back(static_cast<void*>(temp2_data)); out_datas.push_back( static_cast<void*>(output_data + (oc0 + oc1 + oc2) * h * w)); for (int i = 0; i < 4; ++i) { auto func = [&](void* cudnn_workspace) { PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnConvolutionBiasActivationForward( handle, &alpha, in_desc[i], in_datas[i], filter_desc[i], static_cast<const void*>(filters[i]->data<T>()), conv_desc[i], algo[i], cudnn_workspace, workspace_size_in_bytes, &beta, out_desc[i], out_datas[i], bias_desc[i], static_cast<const void*>(bias[i]->data<T>()), cudnn_act_desc, out_desc[i], out_datas[i])); }; auto workspace_handle = dev_ctx.cudnn_workspace_handle(); workspace_handle.RunFunc(func, workspace_size_in_bytes); } cudnnTensorDescriptor_t x_desc; cudnnTensorDescriptor_t y_desc; PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&x_desc)); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&y_desc)); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( x_desc, cudnn_dtype, 4, out_dims[3].data(), out_strides[2].data())); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( y_desc, cudnn_dtype, 4, out_dims[3].data(), out_strides[3].data())); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnTransformTensor( handle, CudnnDataType<T>::kOne(), x_desc, static_cast<const void*>(out_datas[2]), CudnnDataType<T>::kZero(), y_desc, static_cast<void*>(output_data + (oc0 + oc1) * h * w))); for (int i = 0; i < 4; ++i) { PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(in_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(out_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyFilterDescriptor(filter_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bias_desc[i])); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyConvolutionDescriptor(conv_desc[i])); } PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(x_desc)); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(y_desc)); } }; #endif } // namespace operators } // namespace paddle #if CUDNN_VERSION >= 7100 namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(conv2d_inception_fusion, ops::CUDNNConvInceptionFusionOpKernel<float>, ops::CUDNNConvInceptionFusionOpKernel<double>); #endif
31f06747d75208023cccb2b3aae43e8edb25196c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CuComponent.cuh" #include "CuInterpolation.h" #include <stdio.h> namespace HBXDef { template<unsigned int T, HBXDef::CudaMalloc_t M > __global__ void cutestlag1( HBXDef::CuInterpolation<T, M>* _ArrayIn, HBXDef::UserDefFloat* _outData ) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; while ( idx < MAX_GPUITER ) { #ifdef USERDEFSHARED// #endif // USERDEFSHARED// #ifdef WITHTEST // // std::cerr<<"..."<<std::endl; #endif HBXDef::UserDefFloat intercord[4]; for (size_t i = 0; i < RUNCLC; i++) { intercord[0] = -3.0 + i * 20.0 / RUNCLC + 20.0 * i / MAX_GPUITER; intercord[1] = 0.2 + i * 2.0 / RUNCLC + 2.0 * i / MAX_GPUITER; intercord[2] = -30 + i * 30.0 / RUNCLC + 30 * i / MAX_GPUITER; intercord[3] = -5.0 + 5*i / RUNCLC + i*5 / MAX_GPUITER; _ArrayIn[idx].ReadTableData(intercord); } idx += gridDim.x*blockDim.x; } __syncthreads(); } }
31f06747d75208023cccb2b3aae43e8edb25196c.cu
#include "CuComponent.cuh" #include "CuInterpolation.h" #include <stdio.h> namespace HBXDef { template<unsigned int T, HBXDef::CudaMalloc_t M > __global__ void cutestlag1( HBXDef::CuInterpolation<T, M>* _ArrayIn, HBXDef::UserDefFloat* _outData ) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; while ( idx < MAX_GPUITER ) { #ifdef USERDEFSHARED//为了控制共享内存大小 #endif // USERDEFSHARED//为了控制共享内存大小 #ifdef WITHTEST //测试用例,若传入指针为空则生成 // std::cerr<<"传入指针为空..."<<std::endl; #endif HBXDef::UserDefFloat intercord[4]; for (size_t i = 0; i < RUNCLC; i++) { intercord[0] = -3.0 + i * 20.0 / RUNCLC + 20.0 * i / MAX_GPUITER; intercord[1] = 0.2 + i * 2.0 / RUNCLC + 2.0 * i / MAX_GPUITER; intercord[2] = -30 + i * 30.0 / RUNCLC + 30 * i / MAX_GPUITER; intercord[3] = -5.0 + 5*i / RUNCLC + i*5 / MAX_GPUITER; _ArrayIn[idx].ReadTableData(intercord); } idx += gridDim.x*blockDim.x; } __syncthreads(); } }
b1f2d2e4763797bdcb531ed5a8066ecef7803cd2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void vecAdd(float * in1, float * in2, float * out, int len) { register int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < len) out[i] = in1[i] + in2[i]; }
b1f2d2e4763797bdcb531ed5a8066ecef7803cd2.cu
#include "includes.h" __global__ void vecAdd(float * in1, float * in2, float * out, int len) { register int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < len) out[i] = in1[i] + in2[i]; }
0ddb730810a6667b441762991945db8bb0ebdd4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* *To find sum of two large matrices *Compute the speed up obtained by GPU */ #include <stdio.h> const int ROW_SIZE = 300, COL_SIZE = 400; const int MATRIX_BYTES = ROW_SIZE * COL_SIZE * sizeof(int); //kernal __global__ void sum(int d_sum[][ROW_SIZE], int d_a[][ROW_SIZE], int d_b[][ROW_SIZE]) { d_sum[blockIdx.x][threadIdx.x] = d_a[blockIdx.x][threadIdx.x] + d_b[blockIdx.x][threadIdx.x]; } //to check the final result int checkSum(int h_a[][ROW_SIZE], int h_b[][ROW_SIZE], int h_sum[][ROW_SIZE]) { int flag = 1; for(int i = 0; i < COL_SIZE; i++) { for(int j = 0; j < ROW_SIZE; j++) { if(h_sum[i][j] != h_a[i][j] + h_b[i][j]) { flag = 0; break; } } } return flag; } int main() { int h_a[COL_SIZE][ROW_SIZE], h_b[COL_SIZE][ROW_SIZE], h_sum[COL_SIZE][ROW_SIZE]; for(int i = 0; i < COL_SIZE; i++) { for(int j = 0; j < ROW_SIZE; j++) { h_a[i][j] = ((int)rand())%1000; h_b[i][j] = ((int)rand())%1000; } } int (*d_a)[ROW_SIZE], (*d_b)[ROW_SIZE], (*d_sum)[ROW_SIZE]; hipMalloc((void**) &d_a, MATRIX_BYTES); hipMalloc((void**) &d_b, MATRIX_BYTES); hipMalloc((void**) &d_sum, MATRIX_BYTES); hipMemcpy(d_a, h_a, MATRIX_BYTES, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, MATRIX_BYTES, hipMemcpyHostToDevice); hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( sum), dim3(COL_SIZE), dim3(ROW_SIZE), 0, 0, d_sum, d_a, d_b); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipMemcpy(h_sum, d_sum, MATRIX_BYTES, hipMemcpyDeviceToHost); if(checkSum(h_a, h_b, h_sum)) { printf("The result is computed successfully!\n"); hipEventElapsedTime(&time, start, stop); printf("Computation time taken by device: %f\n", time); hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); for(int i = 0; i < COL_SIZE; i++) { for(int j = 0; j < ROW_SIZE; j++) { h_sum[i][j] = h_a[i][j] + h_b[i][j]; } } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("Computation time taken by host: %f\n", time); } else { printf("The result is not computed correctly!"); } hipFree(d_a); hipFree(d_b); hipFree(d_sum); return 0; }
0ddb730810a6667b441762991945db8bb0ebdd4c.cu
/* *To find sum of two large matrices *Compute the speed up obtained by GPU */ #include <stdio.h> const int ROW_SIZE = 300, COL_SIZE = 400; const int MATRIX_BYTES = ROW_SIZE * COL_SIZE * sizeof(int); //kernal __global__ void sum(int d_sum[][ROW_SIZE], int d_a[][ROW_SIZE], int d_b[][ROW_SIZE]) { d_sum[blockIdx.x][threadIdx.x] = d_a[blockIdx.x][threadIdx.x] + d_b[blockIdx.x][threadIdx.x]; } //to check the final result int checkSum(int h_a[][ROW_SIZE], int h_b[][ROW_SIZE], int h_sum[][ROW_SIZE]) { int flag = 1; for(int i = 0; i < COL_SIZE; i++) { for(int j = 0; j < ROW_SIZE; j++) { if(h_sum[i][j] != h_a[i][j] + h_b[i][j]) { flag = 0; break; } } } return flag; } int main() { int h_a[COL_SIZE][ROW_SIZE], h_b[COL_SIZE][ROW_SIZE], h_sum[COL_SIZE][ROW_SIZE]; for(int i = 0; i < COL_SIZE; i++) { for(int j = 0; j < ROW_SIZE; j++) { h_a[i][j] = ((int)rand())%1000; h_b[i][j] = ((int)rand())%1000; } } int (*d_a)[ROW_SIZE], (*d_b)[ROW_SIZE], (*d_sum)[ROW_SIZE]; cudaMalloc((void**) &d_a, MATRIX_BYTES); cudaMalloc((void**) &d_b, MATRIX_BYTES); cudaMalloc((void**) &d_sum, MATRIX_BYTES); cudaMemcpy(d_a, h_a, MATRIX_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, MATRIX_BYTES, cudaMemcpyHostToDevice); cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); sum<<<COL_SIZE, ROW_SIZE>>>(d_sum, d_a, d_b); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaMemcpy(h_sum, d_sum, MATRIX_BYTES, cudaMemcpyDeviceToHost); if(checkSum(h_a, h_b, h_sum)) { printf("The result is computed successfully!\n"); cudaEventElapsedTime(&time, start, stop); printf("Computation time taken by device: %f\n", time); cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for(int i = 0; i < COL_SIZE; i++) { for(int j = 0; j < ROW_SIZE; j++) { h_sum[i][j] = h_a[i][j] + h_b[i][j]; } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Computation time taken by host: %f\n", time); } else { printf("The result is not computed correctly!"); } cudaFree(d_a); cudaFree(d_b); cudaFree(d_sum); return 0; }
1dc475d8c431185868a843737f45bbab1e00fc62.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void delta_hidden ( float * prime_ji, float * delta_i ) { // X grid is size_i int x = blockIdx.x * blockDim.x + threadIdx.x; // [i] = f'( [ji]) * (w[ik] * [k]) // NOTE: delta_i ALREADY contains `(w[ik] * [k])` float rhs = delta_i[x]; // [i] = '( [ji]) * (w[ik] * [k]) delta_i[x] = __fmul_rz( prime_ji[x], rhs ); }
1dc475d8c431185868a843737f45bbab1e00fc62.cu
#include "includes.h" __global__ void delta_hidden ( float * prime_ji, float * delta_i ) { // X grid is size_i int x = blockIdx.x * blockDim.x + threadIdx.x; // δ[i] = f'( Σ[ji]) * Σ(w[ik] * δ[k]) // NOTE: delta_i ALREADY contains `Σ(w[ik] * δ[k])` float rhs = delta_i[x]; // δ[i] = σ'( Σ[ji]) * Σ(w[ik] * δ[k]) delta_i[x] = __fmul_rz( prime_ji[x], rhs ); }
e3bf0d1cb1f6b5e5ac3a7d5009865a168aa85433.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define BLOCK 16 __global__ void quad(float *a, int n, float *u, float *v) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row < n && col < n && col >= row) { float sum = u[col]*a[row*n+col]*u[row]; if (col == row) atomicAdd(v, sum); else atomicAdd(v, 2*sum); } } float gpuquad(float *a, int n, float *u) { float *da, *du, *dv; float v = 0; hipMalloc((void **)&da, n * n * sizeof(float)); hipMalloc((void **)&du, n * sizeof(float)); hipMalloc((void **)&dv, sizeof(float)); hipMemcpy(da, a, n * n * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(du, u, n * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dv, &v, sizeof(float), hipMemcpyHostToDevice); int size = (n+BLOCK-1) / BLOCK; dim3 dimGrid(size, size); dim3 dimBlock(BLOCK, BLOCK); hipLaunchKernelGGL(( quad), dim3(dimGrid), dim3(dimBlock), 0, 0, da, n, du, dv); hipMemcpy(&v, dv, sizeof(float), hipMemcpyDeviceToHost); hipFree(da); hipFree(du); hipFree(dv); return v; }
e3bf0d1cb1f6b5e5ac3a7d5009865a168aa85433.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #define BLOCK 16 __global__ void quad(float *a, int n, float *u, float *v) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row < n && col < n && col >= row) { float sum = u[col]*a[row*n+col]*u[row]; if (col == row) atomicAdd(v, sum); else atomicAdd(v, 2*sum); } } float gpuquad(float *a, int n, float *u) { float *da, *du, *dv; float v = 0; cudaMalloc((void **)&da, n * n * sizeof(float)); cudaMalloc((void **)&du, n * sizeof(float)); cudaMalloc((void **)&dv, sizeof(float)); cudaMemcpy(da, a, n * n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(du, u, n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dv, &v, sizeof(float), cudaMemcpyHostToDevice); int size = (n+BLOCK-1) / BLOCK; dim3 dimGrid(size, size); dim3 dimBlock(BLOCK, BLOCK); quad<<<dimGrid, dimBlock>>>(da, n, du, dv); cudaMemcpy(&v, dv, sizeof(float), cudaMemcpyDeviceToHost); cudaFree(da); cudaFree(du); cudaFree(dv); return v; }
017134b7d483917d2c082bb9cf5899971b5edc8d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: joaander #include "TwoStepBDGPU.cuh" #include "hoomd/VectorMath.h" #include "hoomd/HOOMDMath.h" #include "hoomd/Saru.h" using namespace hoomd; #include <assert.h> /*! \file TwoSteBDGPU.cu \brief Defines GPU kernel code for Brownian integration on the GPU. Used by TwoStepBDGPU. */ //! Shared memory array for gpu_brownian_step_one_kernel() extern __shared__ Scalar s_gammas[]; //! Shared memory array for gpu_brownian_step_one_kernel() extern __shared__ Scalar3 s_gammas_r[]; //! Takes the second half-step forward in the Langevin integration on a group of particles with /*! \param d_pos array of particle positions and types \param d_vel array of particle positions and masses \param d_image array of particle images \param box simulation box \param d_diameter array of particle diameters \param d_tag array of particle tags \param d_group_members Device array listing the indices of the members of the group to integrate \param nwork Number of group members to process on this GPU \param d_net_force Net force on each particle \param d_gamma_r List of per-type gamma_rs (rotational drag coeff.) \param d_orientation Device array of orientation quaternion \param d_torque Device array of net torque on each particle \param d_inertia Device array of moment of inertial of each particle \param d_angmom Device array of transformed angular momentum quaternion of each particle (see online documentation) \param d_gamma List of per-type gammas \param n_types Number of particle types in the simulation \param use_lambda If true, gamma = lambda * diameter \param lambda Scale factor to convert diameter to lambda (when use_lambda is true) \param timestep Current timestep of the simulation \param seed User chosen random number seed \param T Temperature set point \param aniso If set true, the system would go through rigid body updates for its orientation \param deltaT Amount of real time to step forward in one time step \param D Dimensionality of the system \param d_noiseless_t If set true, there will be no translational noise (random force) \param d_noiseless_r If set true, there will be no rotational noise (random torque) \param offset Offset of this GPU into group indices This kernel is implemented in a very similar manner to gpu_nve_step_one_kernel(), see it for design details. Random number generation is done per thread with Saru's 3-seed constructor. The seeds are, the time step, the particle tag, and the user-defined seed. This kernel must be launched with enough dynamic shared memory per block to read in d_gamma */ extern "C" __global__ void gpu_brownian_step_one_kernel(Scalar4 *d_pos, Scalar4 *d_vel, int3 *d_image, const BoxDim box, const Scalar *d_diameter, const unsigned int *d_tag, const unsigned int *d_group_members, const unsigned int nwork, const Scalar4 *d_net_force, const Scalar3 *d_gamma_r, Scalar4 *d_orientation, Scalar4 *d_torque, const Scalar3 *d_inertia, Scalar4 *d_angmom, const Scalar *d_gamma, const unsigned int n_types, const bool use_lambda, const Scalar lambda, const unsigned int timestep, const unsigned int seed, const Scalar T, const bool aniso, const Scalar deltaT, unsigned int D, const bool d_noiseless_t, const bool d_noiseless_r, const unsigned int offset) { if (!use_lambda) { // read in the gamma (1 dimensional array), stored in s_gammas[0: n_type] (Pythonic convention) for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < n_types) s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x]; } __syncthreads(); } // read in the gamma_r, stored in s_gammas_r[0: n_type], which is s_gamma_r[0:n_type] for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < n_types) s_gammas_r[cur_offset + threadIdx.x] = d_gamma_r[cur_offset + threadIdx.x]; } __syncthreads(); // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int local_idx = blockIdx.x * blockDim.x + threadIdx.x; if (local_idx < nwork) { const unsigned int group_idx = local_idx + offset; // determine the particle to work on unsigned int idx = d_group_members[group_idx]; Scalar4 postype = d_pos[idx]; Scalar4 vel = d_vel[idx]; Scalar4 net_force = d_net_force[idx]; int3 image = d_image[idx]; // read in the tag of our particle. unsigned int ptag = d_tag[idx]; // compute the random force detail::Saru saru(ptag, timestep, seed); Scalar rx = saru.s<Scalar>(-1,1); Scalar ry = saru.s<Scalar>(-1,1); Scalar rz = saru.s<Scalar>(-1,1); // calculate the magnitude of the random force Scalar gamma; if (use_lambda) { // determine gamma from diameter gamma = lambda*d_diameter[idx]; } else { // determine gamma from type unsigned int typ = __scalar_as_int(postype.w); gamma = s_gammas[typ]; } // compute the bd force (the extra factor of 3 is because <rx^2> is 1/3 in the uniform -1,1 distribution // it is not the dimensionality of the system Scalar coeff = fast::sqrt(Scalar(3.0)*Scalar(2.0)*gamma*T/deltaT); if (d_noiseless_t) coeff = Scalar(0.0); Scalar Fr_x = rx*coeff; Scalar Fr_y = ry*coeff; Scalar Fr_z = rz*coeff; if (D < 3) Fr_z = Scalar(0.0); // update position postype.x += (net_force.x + Fr_x) * deltaT / gamma; postype.y += (net_force.y + Fr_y) * deltaT / gamma; postype.z += (net_force.z + Fr_z) * deltaT / gamma; // particles may have been moved slightly outside the box by the above steps, wrap them back into place box.wrap(postype, image); // draw a new random velocity for particle j Scalar mass = vel.w; Scalar sigma = fast::sqrt(T/mass); vel.x = gaussian_rng(saru, sigma); vel.y = gaussian_rng(saru, sigma); if (D > 2) vel.z = gaussian_rng(saru, sigma); else vel.z = 0; // write out data d_pos[idx] = postype; d_vel[idx] = vel; d_image[idx] = image; // rotational random force and orientation quaternion updates if (aniso) { unsigned int type_r = __scalar_as_int(d_pos[idx].w); // gamma_r is stored in the second half of s_gammas a.k.a s_gammas_r Scalar3 gamma_r = s_gammas_r[type_r]; if (gamma_r.x > 0 || gamma_r.y > 0 || gamma_r.z > 0) { vec3<Scalar> p_vec; quat<Scalar> q(d_orientation[idx]); vec3<Scalar> t(d_torque[idx]); vec3<Scalar> I(d_inertia[idx]); // check if the shape is degenerate bool x_zero, y_zero, z_zero; x_zero = (I.x < EPSILON); y_zero = (I.y < EPSILON); z_zero = (I.z < EPSILON); Scalar3 sigma_r = make_scalar3(fast::sqrt(Scalar(2.0)*gamma_r.x*T/deltaT), fast::sqrt(Scalar(2.0)*gamma_r.y*T/deltaT), fast::sqrt(Scalar(2.0)*gamma_r.z*T/deltaT)); if (d_noiseless_r) sigma_r = make_scalar3(0,0,0); // original Gaussian random torque // Gaussian random distribution is preferred in terms of preserving the exact math vec3<Scalar> bf_torque; bf_torque.x = gaussian_rng(saru, sigma_r.x); bf_torque.y = gaussian_rng(saru, sigma_r.y); bf_torque.z = gaussian_rng(saru, sigma_r.z); if (x_zero) bf_torque.x = 0; if (y_zero) bf_torque.y = 0; if (z_zero) bf_torque.z = 0; // use the damping by gamma_r and rotate back to lab frame // For Future Updates: take special care when have anisotropic gamma_r bf_torque = rotate(q, bf_torque); if (D < 3) { bf_torque.x = 0; bf_torque.y = 0; t.x = 0; t.y = 0; } // do the integration for quaternion q += Scalar(0.5) * deltaT * ((t + bf_torque) / vec3<Scalar>(gamma_r)) * q ; q = q * (Scalar(1.0) / slow::sqrt(norm2(q))); d_orientation[idx] = quat_to_scalar4(q); // draw a new random ang_mom for particle j in body frame p_vec.x = gaussian_rng(saru, fast::sqrt(T * I.x)); p_vec.y = gaussian_rng(saru, fast::sqrt(T * I.y)); p_vec.z = gaussian_rng(saru, fast::sqrt(T * I.z)); if (x_zero) p_vec.x = 0; if (y_zero) p_vec.y = 0; if (z_zero) p_vec.z = 0; // !! Note this ang_mom isn't well-behaving in 2D, // !! because may have effective non-zero ang_mom in x,y // store ang_mom quaternion quat<Scalar> p = Scalar(2.0) * q * p_vec; d_angmom[idx] = quat_to_scalar4(p); } } } } /*! \param d_pos array of particle positions and types \param d_vel array of particle positions and masses \param d_image array of particle images \param box simulation box \param d_diameter array of particle diameters \param d_tag array of particle tags \param d_group_members Device array listing the indices of the members of the group to integrate \param group_size Number of members in the group \param d_net_force Net force on each particle \param d_gamma_r List of per-type gamma_rs (rotational drag coeff.) \param d_orientation Device array of orientation quaternion \param d_torque Device array of net torque on each particle \param d_inertia Device array of moment of inertial of each particle \param d_angmom Device array of transformed angular momentum quaternion of each particle (see online documentation) \param langevin_args Collected arguments for gpu_brownian_step_one_kernel() \param aniso If set true, the system would go through rigid body updates for its orientation \param deltaT Amount of real time to step forward in one time step \param D Dimensionality of the system \param d_noiseless_t If set true, there will be no translational noise (random force) \param d_noiseless_r If set true, there will be no rotational noise (random torque) This is just a driver for gpu_brownian_step_one_kernel(), see it for details. */ hipError_t gpu_brownian_step_one(Scalar4 *d_pos, Scalar4 *d_vel, int3 *d_image, const BoxDim& box, const Scalar *d_diameter, const unsigned int *d_tag, const unsigned int *d_group_members, const unsigned int group_size, const Scalar4 *d_net_force, const Scalar3 *d_gamma_r, Scalar4 *d_orientation, Scalar4 *d_torque, const Scalar3 *d_inertia, Scalar4 *d_angmom, const langevin_step_two_args& langevin_args, const bool aniso, const Scalar deltaT, const unsigned int D, const bool d_noiseless_t, const bool d_noiseless_r, const GPUPartition& gpu_partition ) { unsigned int run_block_size = 256; // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; // setup the grid to run the kernel dim3 grid( (nwork/run_block_size) + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( gpu_brownian_step_one_kernel), dim3(grid), dim3(threads), (unsigned int)(sizeof(Scalar)*langevin_args.n_types + sizeof(Scalar3)*langevin_args.n_types), 0, d_pos, d_vel, d_image, box, d_diameter, d_tag, d_group_members, nwork, d_net_force, d_gamma_r, d_orientation, d_torque, d_inertia, d_angmom, langevin_args.d_gamma, langevin_args.n_types, langevin_args.use_lambda, langevin_args.lambda, langevin_args.timestep, langevin_args.seed, langevin_args.T, aniso, deltaT, D, d_noiseless_t, d_noiseless_r, range.first); } return hipSuccess; }
017134b7d483917d2c082bb9cf5899971b5edc8d.cu
// Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: joaander #include "TwoStepBDGPU.cuh" #include "hoomd/VectorMath.h" #include "hoomd/HOOMDMath.h" #include "hoomd/Saru.h" using namespace hoomd; #include <assert.h> /*! \file TwoSteBDGPU.cu \brief Defines GPU kernel code for Brownian integration on the GPU. Used by TwoStepBDGPU. */ //! Shared memory array for gpu_brownian_step_one_kernel() extern __shared__ Scalar s_gammas[]; //! Shared memory array for gpu_brownian_step_one_kernel() extern __shared__ Scalar3 s_gammas_r[]; //! Takes the second half-step forward in the Langevin integration on a group of particles with /*! \param d_pos array of particle positions and types \param d_vel array of particle positions and masses \param d_image array of particle images \param box simulation box \param d_diameter array of particle diameters \param d_tag array of particle tags \param d_group_members Device array listing the indices of the members of the group to integrate \param nwork Number of group members to process on this GPU \param d_net_force Net force on each particle \param d_gamma_r List of per-type gamma_rs (rotational drag coeff.) \param d_orientation Device array of orientation quaternion \param d_torque Device array of net torque on each particle \param d_inertia Device array of moment of inertial of each particle \param d_angmom Device array of transformed angular momentum quaternion of each particle (see online documentation) \param d_gamma List of per-type gammas \param n_types Number of particle types in the simulation \param use_lambda If true, gamma = lambda * diameter \param lambda Scale factor to convert diameter to lambda (when use_lambda is true) \param timestep Current timestep of the simulation \param seed User chosen random number seed \param T Temperature set point \param aniso If set true, the system would go through rigid body updates for its orientation \param deltaT Amount of real time to step forward in one time step \param D Dimensionality of the system \param d_noiseless_t If set true, there will be no translational noise (random force) \param d_noiseless_r If set true, there will be no rotational noise (random torque) \param offset Offset of this GPU into group indices This kernel is implemented in a very similar manner to gpu_nve_step_one_kernel(), see it for design details. Random number generation is done per thread with Saru's 3-seed constructor. The seeds are, the time step, the particle tag, and the user-defined seed. This kernel must be launched with enough dynamic shared memory per block to read in d_gamma */ extern "C" __global__ void gpu_brownian_step_one_kernel(Scalar4 *d_pos, Scalar4 *d_vel, int3 *d_image, const BoxDim box, const Scalar *d_diameter, const unsigned int *d_tag, const unsigned int *d_group_members, const unsigned int nwork, const Scalar4 *d_net_force, const Scalar3 *d_gamma_r, Scalar4 *d_orientation, Scalar4 *d_torque, const Scalar3 *d_inertia, Scalar4 *d_angmom, const Scalar *d_gamma, const unsigned int n_types, const bool use_lambda, const Scalar lambda, const unsigned int timestep, const unsigned int seed, const Scalar T, const bool aniso, const Scalar deltaT, unsigned int D, const bool d_noiseless_t, const bool d_noiseless_r, const unsigned int offset) { if (!use_lambda) { // read in the gamma (1 dimensional array), stored in s_gammas[0: n_type] (Pythonic convention) for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < n_types) s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x]; } __syncthreads(); } // read in the gamma_r, stored in s_gammas_r[0: n_type], which is s_gamma_r[0:n_type] for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < n_types) s_gammas_r[cur_offset + threadIdx.x] = d_gamma_r[cur_offset + threadIdx.x]; } __syncthreads(); // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int local_idx = blockIdx.x * blockDim.x + threadIdx.x; if (local_idx < nwork) { const unsigned int group_idx = local_idx + offset; // determine the particle to work on unsigned int idx = d_group_members[group_idx]; Scalar4 postype = d_pos[idx]; Scalar4 vel = d_vel[idx]; Scalar4 net_force = d_net_force[idx]; int3 image = d_image[idx]; // read in the tag of our particle. unsigned int ptag = d_tag[idx]; // compute the random force detail::Saru saru(ptag, timestep, seed); Scalar rx = saru.s<Scalar>(-1,1); Scalar ry = saru.s<Scalar>(-1,1); Scalar rz = saru.s<Scalar>(-1,1); // calculate the magnitude of the random force Scalar gamma; if (use_lambda) { // determine gamma from diameter gamma = lambda*d_diameter[idx]; } else { // determine gamma from type unsigned int typ = __scalar_as_int(postype.w); gamma = s_gammas[typ]; } // compute the bd force (the extra factor of 3 is because <rx^2> is 1/3 in the uniform -1,1 distribution // it is not the dimensionality of the system Scalar coeff = fast::sqrt(Scalar(3.0)*Scalar(2.0)*gamma*T/deltaT); if (d_noiseless_t) coeff = Scalar(0.0); Scalar Fr_x = rx*coeff; Scalar Fr_y = ry*coeff; Scalar Fr_z = rz*coeff; if (D < 3) Fr_z = Scalar(0.0); // update position postype.x += (net_force.x + Fr_x) * deltaT / gamma; postype.y += (net_force.y + Fr_y) * deltaT / gamma; postype.z += (net_force.z + Fr_z) * deltaT / gamma; // particles may have been moved slightly outside the box by the above steps, wrap them back into place box.wrap(postype, image); // draw a new random velocity for particle j Scalar mass = vel.w; Scalar sigma = fast::sqrt(T/mass); vel.x = gaussian_rng(saru, sigma); vel.y = gaussian_rng(saru, sigma); if (D > 2) vel.z = gaussian_rng(saru, sigma); else vel.z = 0; // write out data d_pos[idx] = postype; d_vel[idx] = vel; d_image[idx] = image; // rotational random force and orientation quaternion updates if (aniso) { unsigned int type_r = __scalar_as_int(d_pos[idx].w); // gamma_r is stored in the second half of s_gammas a.k.a s_gammas_r Scalar3 gamma_r = s_gammas_r[type_r]; if (gamma_r.x > 0 || gamma_r.y > 0 || gamma_r.z > 0) { vec3<Scalar> p_vec; quat<Scalar> q(d_orientation[idx]); vec3<Scalar> t(d_torque[idx]); vec3<Scalar> I(d_inertia[idx]); // check if the shape is degenerate bool x_zero, y_zero, z_zero; x_zero = (I.x < EPSILON); y_zero = (I.y < EPSILON); z_zero = (I.z < EPSILON); Scalar3 sigma_r = make_scalar3(fast::sqrt(Scalar(2.0)*gamma_r.x*T/deltaT), fast::sqrt(Scalar(2.0)*gamma_r.y*T/deltaT), fast::sqrt(Scalar(2.0)*gamma_r.z*T/deltaT)); if (d_noiseless_r) sigma_r = make_scalar3(0,0,0); // original Gaussian random torque // Gaussian random distribution is preferred in terms of preserving the exact math vec3<Scalar> bf_torque; bf_torque.x = gaussian_rng(saru, sigma_r.x); bf_torque.y = gaussian_rng(saru, sigma_r.y); bf_torque.z = gaussian_rng(saru, sigma_r.z); if (x_zero) bf_torque.x = 0; if (y_zero) bf_torque.y = 0; if (z_zero) bf_torque.z = 0; // use the damping by gamma_r and rotate back to lab frame // For Future Updates: take special care when have anisotropic gamma_r bf_torque = rotate(q, bf_torque); if (D < 3) { bf_torque.x = 0; bf_torque.y = 0; t.x = 0; t.y = 0; } // do the integration for quaternion q += Scalar(0.5) * deltaT * ((t + bf_torque) / vec3<Scalar>(gamma_r)) * q ; q = q * (Scalar(1.0) / slow::sqrt(norm2(q))); d_orientation[idx] = quat_to_scalar4(q); // draw a new random ang_mom for particle j in body frame p_vec.x = gaussian_rng(saru, fast::sqrt(T * I.x)); p_vec.y = gaussian_rng(saru, fast::sqrt(T * I.y)); p_vec.z = gaussian_rng(saru, fast::sqrt(T * I.z)); if (x_zero) p_vec.x = 0; if (y_zero) p_vec.y = 0; if (z_zero) p_vec.z = 0; // !! Note this ang_mom isn't well-behaving in 2D, // !! because may have effective non-zero ang_mom in x,y // store ang_mom quaternion quat<Scalar> p = Scalar(2.0) * q * p_vec; d_angmom[idx] = quat_to_scalar4(p); } } } } /*! \param d_pos array of particle positions and types \param d_vel array of particle positions and masses \param d_image array of particle images \param box simulation box \param d_diameter array of particle diameters \param d_tag array of particle tags \param d_group_members Device array listing the indices of the members of the group to integrate \param group_size Number of members in the group \param d_net_force Net force on each particle \param d_gamma_r List of per-type gamma_rs (rotational drag coeff.) \param d_orientation Device array of orientation quaternion \param d_torque Device array of net torque on each particle \param d_inertia Device array of moment of inertial of each particle \param d_angmom Device array of transformed angular momentum quaternion of each particle (see online documentation) \param langevin_args Collected arguments for gpu_brownian_step_one_kernel() \param aniso If set true, the system would go through rigid body updates for its orientation \param deltaT Amount of real time to step forward in one time step \param D Dimensionality of the system \param d_noiseless_t If set true, there will be no translational noise (random force) \param d_noiseless_r If set true, there will be no rotational noise (random torque) This is just a driver for gpu_brownian_step_one_kernel(), see it for details. */ cudaError_t gpu_brownian_step_one(Scalar4 *d_pos, Scalar4 *d_vel, int3 *d_image, const BoxDim& box, const Scalar *d_diameter, const unsigned int *d_tag, const unsigned int *d_group_members, const unsigned int group_size, const Scalar4 *d_net_force, const Scalar3 *d_gamma_r, Scalar4 *d_orientation, Scalar4 *d_torque, const Scalar3 *d_inertia, Scalar4 *d_angmom, const langevin_step_two_args& langevin_args, const bool aniso, const Scalar deltaT, const unsigned int D, const bool d_noiseless_t, const bool d_noiseless_r, const GPUPartition& gpu_partition ) { unsigned int run_block_size = 256; // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; // setup the grid to run the kernel dim3 grid( (nwork/run_block_size) + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // run the kernel gpu_brownian_step_one_kernel<<< grid, threads, (unsigned int)(sizeof(Scalar)*langevin_args.n_types + sizeof(Scalar3)*langevin_args.n_types)>>> (d_pos, d_vel, d_image, box, d_diameter, d_tag, d_group_members, nwork, d_net_force, d_gamma_r, d_orientation, d_torque, d_inertia, d_angmom, langevin_args.d_gamma, langevin_args.n_types, langevin_args.use_lambda, langevin_args.lambda, langevin_args.timestep, langevin_args.seed, langevin_args.T, aniso, deltaT, D, d_noiseless_t, d_noiseless_r, range.first); } return cudaSuccess; }
3a61cb27892c044a0e138e884b05dc0ae3382e6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hip/hip_cooperative_groups.h> extern "C" __global__ void vecAdd(int *l, int *r, int *p, size_t N) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < N) { p[idx] = l[idx] + r[idx]; } cooperative_groups::this_grid().sync(); }
3a61cb27892c044a0e138e884b05dc0ae3382e6f.cu
#include <cooperative_groups.h> extern "C" __global__ void vecAdd(int *l, int *r, int *p, size_t N) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < N) { p[idx] = l[idx] + r[idx]; } cooperative_groups::this_grid().sync(); }
63b7e002e7f357acc344a940508235dc92436ac5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/VolumetricAveragePooling.cu" #else static inline void THNN_(VolumetricAveragePooling_shapeCheck)( THCState *state, THCTensor *input, THCTensor *gradOutput, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode) { int inputSlices; int inputTime; int inputHeight; int inputWidth; int ndim = input->nDimension; int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; if (input->nDimension == 5) { dimN++; dimt++; dimh++; dimw++; } if (THCTensor_(nDimension)(state, input) == 4) { THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH && input->size[dimt] >= kT, 2, "input image (T: %d H: %d W: %d) smaller than " "kernel size (kT: %d kH: %d kW: %d)", input->size[dimt], input->size[dimh], input->size[dimw], kT, kH, kW); /* sizes */ inputSlices = THCTensor_(size)(state, input, 0); inputTime = THCTensor_(size)(state, input, 1); inputHeight = THCTensor_(size)(state, input, 2); inputWidth = THCTensor_(size)(state, input, 3); } else if (THCTensor_(nDimension)(state, input) == 5) { THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH && input->size[dimt] >= kT, 2, "input image (T: %d H: %d W: %d) smaller than " "kernel size (kT: %d kH: %d kW: %d)", input->size[dimt], input->size[dimh], input->size[dimw], kT, kH, kW); /* sizes */ inputSlices = THCTensor_(size)(state, input, 1); inputTime = THCTensor_(size)(state, input, 2); inputHeight = THCTensor_(size)(state, input, 3); inputWidth = THCTensor_(size)(state, input, 4); } else { THArgError(2, "4D or 5D tensor expected, but got: %d", input->nDimension); } // The second argument is the index of padH. THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 11, "pad should not be greater than half of kernel size, but got " "padT = %d, padW = %d, padH = %d, kT = %d, kW = %d, kH = %d", padT, padW, padH, kT, kW, kH); int outputTime; int outputHeight; int outputWidth; if (ceil_mode) { outputTime = ceil(float(inputTime - kT + 2*padT) / float(dT)) + 1; outputHeight = ceil(float(inputHeight - kH + 2*padH) / float(dH)) + 1; outputWidth = ceil(float(inputWidth - kW + 2*padW) / float(dW)) + 1; } else { outputTime = floor(float(inputTime - kT + 2*padT) / float(dT)) + 1; outputHeight = floor(float(inputHeight - kH + 2*padH) / float(dH)) + 1; outputWidth = floor(float(inputWidth - kW + 2*padW) / float(dW)) + 1; } if (padT || padW || padH) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((outputTime - 1)*dT >= inputTime + padT) --outputTime; if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, ndim, dimN, inputSlices); THCUNN_check_dim_size(state, gradOutput, ndim, dimt, outputTime); THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight); THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth); } } void THNN_(VolumetricAveragePooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode, bool count_include_pad) { int batchSize; int inputSlices; int inputTime; int inputHeight; int inputWidth; int dimt = 1; int dimh = 2; int dimw = 3; int fiveDimensionalInput = THCTensor_(nDimension)(state, input) == 5; if (fiveDimensionalInput) { dimt++; dimh++; dimw++; } THNN_(VolumetricAveragePooling_shapeCheck) (state, input, NULL, kT, kW, kH, dT, dW, dH, padT, padW, padH, ceil_mode); if (!fiveDimensionalInput) /* 4D */ { /* sizes */ batchSize = 1; inputSlices = THCTensor_(size)(state, input, 0); inputTime = THCTensor_(size)(state, input, 1); inputHeight = THCTensor_(size)(state, input, 2); inputWidth = THCTensor_(size)(state, input, 3); } else /* 5D */ { /* sizes */ batchSize = THCTensor_(size)(state, input, 0); inputSlices = THCTensor_(size)(state, input, 1); inputTime = THCTensor_(size)(state, input, 2); inputHeight = THCTensor_(size)(state, input, 3); inputWidth = THCTensor_(size)(state, input, 4); } int outputTime; int outputHeight; int outputWidth; if (ceil_mode) { outputTime = ceil(float(inputTime - kT + 2*padT) / float(dT)) + 1; outputHeight = ceil(float(inputHeight - kH + 2*padH) / float(dH)) + 1; outputWidth = ceil(float(inputWidth - kW + 2*padW) / float(dW)) + 1; } else { outputTime = floor(float(inputTime - kT + 2*padT) / float(dT)) + 1; outputHeight = floor(float(inputHeight - kH + 2*padH) / float(dH)) + 1; outputWidth = floor(float(inputWidth - kW + 2*padW) / float(dW)) + 1; } if (padT || padH || padW) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((outputTime - 1)*dT >= inputTime + padT) --outputTime; if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } if (!fiveDimensionalInput) /* 4D */ { /* resize output */ THCTensor_(resize4d)(state, output, inputSlices, outputTime, outputHeight, outputWidth); } else /* 5D */ { THCTensor_(resize5d)(state, output, batchSize, inputSlices, outputTime, outputHeight, outputWidth); } input = THCTensor_(newContiguous)(state, input); if (fiveDimensionalInput) { // Collapse batch and feature dimensions output = THCTensor_(newFoldBatchDim)(state, output); THCTensor *old_input = input; input = THCTensor_(newFoldBatchDim)(state, input); THCTensor_(free)(state, old_input); } else { THCTensor_(retain)(state, output); } THCDeviceTensor<real, 4> cudaInput; THCDeviceTensor<real, 4> cudaOutput; cudaInput = toDeviceTensor<real, 4>(state, input); cudaOutput = toDeviceTensor<real, 4>(state, output); int totalZ = outputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)), THCCeilDiv(outputHeight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); switch (kW) { LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(1); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(2); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(3); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(4); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(5); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(6); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(7); default: hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateOutput<real, accreal>) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), cudaInput, cudaOutput, kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); break; } totalZ -= 65535; offsetZ += 65535; THCudaCheck(hipGetLastError()); } THCTensor_(free)(state, input); THCTensor_(free)(state, output); } void THNN_(VolumetricAveragePooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode, bool count_include_pad) { THNN_(VolumetricAveragePooling_shapeCheck) (state, input, gradOutput, kT, kW, kH, dT, dW, dH, padT, padW, padH, ceil_mode); bool kernelsOverlap = (dT < kT) || (dH < kH) || (dW < kW); // Resize and initialize result tensor. THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); int batchSize; int inputSlices; int inputTime; int inputHeight; int inputWidth; int outputTime; int outputHeight; int outputWidth; int fiveDimensionalInput = THCTensor_(nDimension)(state, input) == 5; if (!fiveDimensionalInput) /* 4D */ { batchSize = 1; inputSlices = THCTensor_(size)(state, input, 0); inputTime = THCTensor_(size)(state, input, 1); inputHeight = THCTensor_(size)(state, input, 2); inputWidth = THCTensor_(size)(state, input, 3); outputTime = THCTensor_(size)(state, gradOutput, 1); outputHeight = THCTensor_(size)(state, gradOutput, 2); outputWidth = THCTensor_(size)(state, gradOutput, 3); } else { batchSize = THCTensor_(size)(state, input, 0); inputSlices = THCTensor_(size)(state, input, 1); inputTime = THCTensor_(size)(state, input, 2); inputHeight = THCTensor_(size)(state, input, 3); inputWidth = THCTensor_(size)(state, input, 4); outputTime = THCTensor_(size)(state, gradOutput, 2); outputHeight = THCTensor_(size)(state, gradOutput, 3); outputWidth = THCTensor_(size)(state, gradOutput, 4); } gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (fiveDimensionalInput) { // Collapse batch and feature dimensions gradInput = THCTensor_(newFoldBatchDim)(state, gradInput); THCTensor *old_gradOutput = gradOutput; gradOutput = THCTensor_(newFoldBatchDim)(state, gradOutput); THCTensor_(free)(state, old_gradOutput); } else { THCTensor_(retain)(state, gradInput); } THCDeviceTensor<real, 4> cudaGradInput; THCDeviceTensor<real, 4> cudaGradOutput; cudaGradInput = toDeviceTensor<real, 4>(state, gradInput); cudaGradOutput = toDeviceTensor<real, 4>(state, gradOutput); dim3 block(32, 8); // Optimizing for stride 1 is probably only of limited value, but this // specialization yields 3x speedup over the atomicAdd implementation. // Padding must be 0, otherwise, pool size may change. if (dT == 1 && dH == 1 && dW == 1 && padT == 0 && padH == 0 && padW == 0) { int totalZ = inputTime * inputSlices * batchSize; int offsetZ = 0; while (totalZ > 0) { dim3 grid(THCCeilDiv(inputWidth, static_cast<int>(block.x)), THCCeilDiv(inputHeight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateGradInput_Stride1<real, accreal>) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), cudaGradOutput, cudaGradInput, kT, kH, kW, 1.0f/(kT * kH * kW), offsetZ); THCudaCheck(hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } } else { int totalZ = outputTime * inputSlices * batchSize; int offsetZ = 0; while (totalZ > 0) { dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)), THCCeilDiv(outputHeight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); if (kernelsOverlap) { hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateGradInput_atomicAdd<real, accreal>) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); } else { hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateGradInput<real, accreal>) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); } THCudaCheck(hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } } THCTensor_(free)(state, gradInput); THCTensor_(free)(state, gradOutput); } #endif
63b7e002e7f357acc344a940508235dc92436ac5.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/VolumetricAveragePooling.cu" #else static inline void THNN_(VolumetricAveragePooling_shapeCheck)( THCState *state, THCTensor *input, THCTensor *gradOutput, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode) { int inputSlices; int inputTime; int inputHeight; int inputWidth; int ndim = input->nDimension; int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; if (input->nDimension == 5) { dimN++; dimt++; dimh++; dimw++; } if (THCTensor_(nDimension)(state, input) == 4) { THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH && input->size[dimt] >= kT, 2, "input image (T: %d H: %d W: %d) smaller than " "kernel size (kT: %d kH: %d kW: %d)", input->size[dimt], input->size[dimh], input->size[dimw], kT, kH, kW); /* sizes */ inputSlices = THCTensor_(size)(state, input, 0); inputTime = THCTensor_(size)(state, input, 1); inputHeight = THCTensor_(size)(state, input, 2); inputWidth = THCTensor_(size)(state, input, 3); } else if (THCTensor_(nDimension)(state, input) == 5) { THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH && input->size[dimt] >= kT, 2, "input image (T: %d H: %d W: %d) smaller than " "kernel size (kT: %d kH: %d kW: %d)", input->size[dimt], input->size[dimh], input->size[dimw], kT, kH, kW); /* sizes */ inputSlices = THCTensor_(size)(state, input, 1); inputTime = THCTensor_(size)(state, input, 2); inputHeight = THCTensor_(size)(state, input, 3); inputWidth = THCTensor_(size)(state, input, 4); } else { THArgError(2, "4D or 5D tensor expected, but got: %d", input->nDimension); } // The second argument is the index of padH. THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 11, "pad should not be greater than half of kernel size, but got " "padT = %d, padW = %d, padH = %d, kT = %d, kW = %d, kH = %d", padT, padW, padH, kT, kW, kH); int outputTime; int outputHeight; int outputWidth; if (ceil_mode) { outputTime = ceil(float(inputTime - kT + 2*padT) / float(dT)) + 1; outputHeight = ceil(float(inputHeight - kH + 2*padH) / float(dH)) + 1; outputWidth = ceil(float(inputWidth - kW + 2*padW) / float(dW)) + 1; } else { outputTime = floor(float(inputTime - kT + 2*padT) / float(dT)) + 1; outputHeight = floor(float(inputHeight - kH + 2*padH) / float(dH)) + 1; outputWidth = floor(float(inputWidth - kW + 2*padW) / float(dW)) + 1; } if (padT || padW || padH) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((outputTime - 1)*dT >= inputTime + padT) --outputTime; if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, ndim, dimN, inputSlices); THCUNN_check_dim_size(state, gradOutput, ndim, dimt, outputTime); THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight); THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth); } } void THNN_(VolumetricAveragePooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode, bool count_include_pad) { int batchSize; int inputSlices; int inputTime; int inputHeight; int inputWidth; int dimt = 1; int dimh = 2; int dimw = 3; int fiveDimensionalInput = THCTensor_(nDimension)(state, input) == 5; if (fiveDimensionalInput) { dimt++; dimh++; dimw++; } THNN_(VolumetricAveragePooling_shapeCheck) (state, input, NULL, kT, kW, kH, dT, dW, dH, padT, padW, padH, ceil_mode); if (!fiveDimensionalInput) /* 4D */ { /* sizes */ batchSize = 1; inputSlices = THCTensor_(size)(state, input, 0); inputTime = THCTensor_(size)(state, input, 1); inputHeight = THCTensor_(size)(state, input, 2); inputWidth = THCTensor_(size)(state, input, 3); } else /* 5D */ { /* sizes */ batchSize = THCTensor_(size)(state, input, 0); inputSlices = THCTensor_(size)(state, input, 1); inputTime = THCTensor_(size)(state, input, 2); inputHeight = THCTensor_(size)(state, input, 3); inputWidth = THCTensor_(size)(state, input, 4); } int outputTime; int outputHeight; int outputWidth; if (ceil_mode) { outputTime = ceil(float(inputTime - kT + 2*padT) / float(dT)) + 1; outputHeight = ceil(float(inputHeight - kH + 2*padH) / float(dH)) + 1; outputWidth = ceil(float(inputWidth - kW + 2*padW) / float(dW)) + 1; } else { outputTime = floor(float(inputTime - kT + 2*padT) / float(dT)) + 1; outputHeight = floor(float(inputHeight - kH + 2*padH) / float(dH)) + 1; outputWidth = floor(float(inputWidth - kW + 2*padW) / float(dW)) + 1; } if (padT || padH || padW) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((outputTime - 1)*dT >= inputTime + padT) --outputTime; if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } if (!fiveDimensionalInput) /* 4D */ { /* resize output */ THCTensor_(resize4d)(state, output, inputSlices, outputTime, outputHeight, outputWidth); } else /* 5D */ { THCTensor_(resize5d)(state, output, batchSize, inputSlices, outputTime, outputHeight, outputWidth); } input = THCTensor_(newContiguous)(state, input); if (fiveDimensionalInput) { // Collapse batch and feature dimensions output = THCTensor_(newFoldBatchDim)(state, output); THCTensor *old_input = input; input = THCTensor_(newFoldBatchDim)(state, input); THCTensor_(free)(state, old_input); } else { THCTensor_(retain)(state, output); } THCDeviceTensor<real, 4> cudaInput; THCDeviceTensor<real, 4> cudaOutput; cudaInput = toDeviceTensor<real, 4>(state, input); cudaOutput = toDeviceTensor<real, 4>(state, output); int totalZ = outputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)), THCCeilDiv(outputHeight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); switch (kW) { LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(1); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(2); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(3); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(4); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(5); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(6); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(7); default: cuda_VolumetricAveragePooling_updateOutput<real, accreal> <<<grid, block, 0, THCState_getCurrentStream(state)>>>( cudaInput, cudaOutput, kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); break; } totalZ -= 65535; offsetZ += 65535; THCudaCheck(cudaGetLastError()); } THCTensor_(free)(state, input); THCTensor_(free)(state, output); } void THNN_(VolumetricAveragePooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode, bool count_include_pad) { THNN_(VolumetricAveragePooling_shapeCheck) (state, input, gradOutput, kT, kW, kH, dT, dW, dH, padT, padW, padH, ceil_mode); bool kernelsOverlap = (dT < kT) || (dH < kH) || (dW < kW); // Resize and initialize result tensor. THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); int batchSize; int inputSlices; int inputTime; int inputHeight; int inputWidth; int outputTime; int outputHeight; int outputWidth; int fiveDimensionalInput = THCTensor_(nDimension)(state, input) == 5; if (!fiveDimensionalInput) /* 4D */ { batchSize = 1; inputSlices = THCTensor_(size)(state, input, 0); inputTime = THCTensor_(size)(state, input, 1); inputHeight = THCTensor_(size)(state, input, 2); inputWidth = THCTensor_(size)(state, input, 3); outputTime = THCTensor_(size)(state, gradOutput, 1); outputHeight = THCTensor_(size)(state, gradOutput, 2); outputWidth = THCTensor_(size)(state, gradOutput, 3); } else { batchSize = THCTensor_(size)(state, input, 0); inputSlices = THCTensor_(size)(state, input, 1); inputTime = THCTensor_(size)(state, input, 2); inputHeight = THCTensor_(size)(state, input, 3); inputWidth = THCTensor_(size)(state, input, 4); outputTime = THCTensor_(size)(state, gradOutput, 2); outputHeight = THCTensor_(size)(state, gradOutput, 3); outputWidth = THCTensor_(size)(state, gradOutput, 4); } gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (fiveDimensionalInput) { // Collapse batch and feature dimensions gradInput = THCTensor_(newFoldBatchDim)(state, gradInput); THCTensor *old_gradOutput = gradOutput; gradOutput = THCTensor_(newFoldBatchDim)(state, gradOutput); THCTensor_(free)(state, old_gradOutput); } else { THCTensor_(retain)(state, gradInput); } THCDeviceTensor<real, 4> cudaGradInput; THCDeviceTensor<real, 4> cudaGradOutput; cudaGradInput = toDeviceTensor<real, 4>(state, gradInput); cudaGradOutput = toDeviceTensor<real, 4>(state, gradOutput); dim3 block(32, 8); // Optimizing for stride 1 is probably only of limited value, but this // specialization yields 3x speedup over the atomicAdd implementation. // Padding must be 0, otherwise, pool size may change. if (dT == 1 && dH == 1 && dW == 1 && padT == 0 && padH == 0 && padW == 0) { int totalZ = inputTime * inputSlices * batchSize; int offsetZ = 0; while (totalZ > 0) { dim3 grid(THCCeilDiv(inputWidth, static_cast<int>(block.x)), THCCeilDiv(inputHeight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); cuda_VolumetricAveragePooling_updateGradInput_Stride1<real, accreal> <<<grid, block, 0, THCState_getCurrentStream(state)>>>( cudaGradOutput, cudaGradInput, kT, kH, kW, 1.0f/(kT * kH * kW), offsetZ); THCudaCheck(cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } } else { int totalZ = outputTime * inputSlices * batchSize; int offsetZ = 0; while (totalZ > 0) { dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)), THCCeilDiv(outputHeight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); if (kernelsOverlap) { cuda_VolumetricAveragePooling_updateGradInput_atomicAdd<real, accreal> <<<grid, block, 0, THCState_getCurrentStream(state)>>>( cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); } else { cuda_VolumetricAveragePooling_updateGradInput<real, accreal> <<<grid, block, 0, THCState_getCurrentStream(state)>>>( cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); } THCudaCheck(cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } } THCTensor_(free)(state, gradInput); THCTensor_(free)(state, gradOutput); } #endif
9c44ba1e886297a634ede2f90330c4421493af91.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "myFunctions.h" __device__ int numOfChars(char* group){ int count=0; while(*(group+count) != '\0'){ count++; } return count; } __device__ int isEqual(char x, char y){ if (x == y) { return 1; } return 0; } __device__ int isConservative(char x, char y) { char *conserGroups[9] = { "NDEQ", "NEQK", "STA", "MILV", "QHRK", "NHQK", "FYW", "HY", "MILF" }; int count, groupSize = 0; for (int i = 0; i < 9; i++) { count = 0; groupSize = numOfChars(*(conserGroups + i)); for (int j = 0; j < groupSize; j++) { if (isEqual(x, *(*(conserGroups + i) + j))) { count++; } else if (isEqual(y, *(*(conserGroups + i) + j))) { count++; } } if (count == 2) { return 1; } } return 0; } __device__ int isSemiConservative(char x, char y) { char *semiConserGroups[11] = { "SAG", "ATV", "CSA", "SGND", "STPA", "STNK", "NEQHRK", "NDEQHK", "SNDEQK", "HFY", "FVLIM" }; int count, groupSize = 0; for (int i = 0; i < 11; i++) { count = 0; groupSize = numOfChars(*(semiConserGroups + i)); for (int j = 0; j < groupSize; j++) { if (isEqual(x, *(*(semiConserGroups + i) + j))) { count++; } else if (isEqual(y, *(*(semiConserGroups + i) + j))) { count++; } } if (count == 2) { return 1; } } return 0; } __device__ double calcResult(double* results, double* weights) { double star = *(results); double colon = *(results + 1); double dot = *(results + 2); double space = *(results + 3); double score = star * weights[0] - colon * weights[1] - dot * weights[2] - space * weights[3]; return score; } //each offset is calculating his score __global__ void comparisonMutantPrime_gpu(char* primeSeq, char* mutant, int lenMutant, int offset, double* weights, double* offSetScores, int my_rank) { int tid = blockIdx.x * blockDim.x + threadIdx.x;//identify the current offset if(tid < lenMutant){ char primeSeqChar; char mutantChar; double results[4] = {0, 0, 0, 0}; /*Sum each one of the equation variables {*, :, . ,""}*/ for(int j=0;j<lenMutant;j++){ primeSeqChar = *(primeSeq + tid + j); mutantChar = *(mutant + j); if (isEqual(primeSeqChar, mutantChar)) { //* results[0]++; } else if (isConservative(primeSeqChar, mutantChar)) { //: results[1]++; } else if (isSemiConservative(primeSeqChar, mutantChar)) { //. results[2]++; } else { //isSpace //" " results[3]++; } } double score = calcResult(results, weights); offSetScores[tid] = score; } } double ComputeOnGPU(char* primeSeq, char* mutant, double* weights, int possibleOffset, int tid, int my_rank, int* p_offset) { int lenPrimeSequence = strlen(primeSeq); int lenMutant = strlen(mutant); //Allocateing memory double *offsetScore = (double*)malloc(sizeof(double)*possibleOffset); double *d_offsetScore; hipMalloc((void**) &d_offsetScore, sizeof(double) * possibleOffset); //Allocateing memory and trasfer data from CPU to GPU char *d_primeSeq; hipMalloc((void**) &d_primeSeq, sizeof(char) * (lenPrimeSequence+1)); hipMemcpy(d_primeSeq, primeSeq, sizeof(char) * (lenPrimeSequence+1), hipMemcpyHostToDevice); //Allocateing memory and trasfer data from CPU to GPU char* d_mutant; hipMalloc((void**) &d_mutant, sizeof(char) * (lenMutant+1)); hipMemcpy(d_mutant, mutant, sizeof(char) * (lenMutant+1), hipMemcpyHostToDevice); //Allocateing memory and trasfer data from CPU to GPU double *d_weights; hipMalloc((void**) &d_weights, sizeof(double) * EQUATION_ELEMENTS); hipMemcpy(d_weights, weights, sizeof(double) * EQUATION_ELEMENTS, hipMemcpyHostToDevice); //Calc blocks and threads int block_size = 256; int grid_size = (possibleOffset + block_size - 1) / block_size; hipLaunchKernelGGL(( comparisonMutantPrime_gpu), dim3(grid_size),dim3(block_size), 0, 0, d_primeSeq, d_mutant, lenMutant, possibleOffset,d_weights, d_offsetScore, my_rank); //Transfer data from GPU to CPU hipMemcpy(offsetScore, d_offsetScore, sizeof(char) * possibleOffset, hipMemcpyDeviceToHost); double bestScore = offsetScore[0]; //p_offset = 0; for(int i=1;i<possibleOffset;i++){ if(bestScore < offsetScore[i]){ bestScore = offsetScore[i]; //p_offset = i; } } hipFree(d_primeSeq); hipFree(d_mutant); free(offsetScore); hipFree(d_offsetScore); return bestScore; }
9c44ba1e886297a634ede2f90330c4421493af91.cu
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include "myFunctions.h" __device__ int numOfChars(char* group){ int count=0; while(*(group+count) != '\0'){ count++; } return count; } __device__ int isEqual(char x, char y){ if (x == y) { return 1; } return 0; } __device__ int isConservative(char x, char y) { char *conserGroups[9] = { "NDEQ", "NEQK", "STA", "MILV", "QHRK", "NHQK", "FYW", "HY", "MILF" }; int count, groupSize = 0; for (int i = 0; i < 9; i++) { count = 0; groupSize = numOfChars(*(conserGroups + i)); for (int j = 0; j < groupSize; j++) { if (isEqual(x, *(*(conserGroups + i) + j))) { count++; } else if (isEqual(y, *(*(conserGroups + i) + j))) { count++; } } if (count == 2) { return 1; } } return 0; } __device__ int isSemiConservative(char x, char y) { char *semiConserGroups[11] = { "SAG", "ATV", "CSA", "SGND", "STPA", "STNK", "NEQHRK", "NDEQHK", "SNDEQK", "HFY", "FVLIM" }; int count, groupSize = 0; for (int i = 0; i < 11; i++) { count = 0; groupSize = numOfChars(*(semiConserGroups + i)); for (int j = 0; j < groupSize; j++) { if (isEqual(x, *(*(semiConserGroups + i) + j))) { count++; } else if (isEqual(y, *(*(semiConserGroups + i) + j))) { count++; } } if (count == 2) { return 1; } } return 0; } __device__ double calcResult(double* results, double* weights) { double star = *(results); double colon = *(results + 1); double dot = *(results + 2); double space = *(results + 3); double score = star * weights[0] - colon * weights[1] - dot * weights[2] - space * weights[3]; return score; } //each offset is calculating his score __global__ void comparisonMutantPrime_gpu(char* primeSeq, char* mutant, int lenMutant, int offset, double* weights, double* offSetScores, int my_rank) { int tid = blockIdx.x * blockDim.x + threadIdx.x;//identify the current offset if(tid < lenMutant){ char primeSeqChar; char mutantChar; double results[4] = {0, 0, 0, 0}; /*Sum each one of the equation variables {*, :, . ,""}*/ for(int j=0;j<lenMutant;j++){ primeSeqChar = *(primeSeq + tid + j); mutantChar = *(mutant + j); if (isEqual(primeSeqChar, mutantChar)) { //* results[0]++; } else if (isConservative(primeSeqChar, mutantChar)) { //: results[1]++; } else if (isSemiConservative(primeSeqChar, mutantChar)) { //. results[2]++; } else { //isSpace //" " results[3]++; } } double score = calcResult(results, weights); offSetScores[tid] = score; } } double ComputeOnGPU(char* primeSeq, char* mutant, double* weights, int possibleOffset, int tid, int my_rank, int* p_offset) { int lenPrimeSequence = strlen(primeSeq); int lenMutant = strlen(mutant); //Allocateing memory double *offsetScore = (double*)malloc(sizeof(double)*possibleOffset); double *d_offsetScore; cudaMalloc((void**) &d_offsetScore, sizeof(double) * possibleOffset); //Allocateing memory and trasfer data from CPU to GPU char *d_primeSeq; cudaMalloc((void**) &d_primeSeq, sizeof(char) * (lenPrimeSequence+1)); cudaMemcpy(d_primeSeq, primeSeq, sizeof(char) * (lenPrimeSequence+1), cudaMemcpyHostToDevice); //Allocateing memory and trasfer data from CPU to GPU char* d_mutant; cudaMalloc((void**) &d_mutant, sizeof(char) * (lenMutant+1)); cudaMemcpy(d_mutant, mutant, sizeof(char) * (lenMutant+1), cudaMemcpyHostToDevice); //Allocateing memory and trasfer data from CPU to GPU double *d_weights; cudaMalloc((void**) &d_weights, sizeof(double) * EQUATION_ELEMENTS); cudaMemcpy(d_weights, weights, sizeof(double) * EQUATION_ELEMENTS, cudaMemcpyHostToDevice); //Calc blocks and threads int block_size = 256; int grid_size = (possibleOffset + block_size - 1) / block_size; comparisonMutantPrime_gpu<<<grid_size,block_size>>>(d_primeSeq, d_mutant, lenMutant, possibleOffset,d_weights, d_offsetScore, my_rank); //Transfer data from GPU to CPU cudaMemcpy(offsetScore, d_offsetScore, sizeof(char) * possibleOffset, cudaMemcpyDeviceToHost); double bestScore = offsetScore[0]; //p_offset = 0; for(int i=1;i<possibleOffset;i++){ if(bestScore < offsetScore[i]){ bestScore = offsetScore[i]; //p_offset = i; } } cudaFree(d_primeSeq); cudaFree(d_mutant); free(offsetScore); cudaFree(d_offsetScore); return bestScore; }
8fa5652c0396b7fa749e46a8a174c8ec5d2919a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <primitiv/config.h> #include <primitiv/devices/cuda/device.h> #include <primitiv/internal/cuda/utils.h> #include <primitiv/devices/cuda/ops/common.h> namespace { __global__ void set_const_dev(float k, std::uint32_t size, float *py) { const std::uint32_t i = IDX; if (i < size) py[i] = k; } } // namespace namespace primitiv { namespace devices { void CUDA::reset_tensor_impl(float k, Tensor &x) { const std::uint32_t size = x.shape().size(); const std::uint32_t num_blocks = GRID_SIZE(size, dim1_x_); CUDA_CALL(::hipSetDevice(dev_id_)); hipLaunchKernelGGL(( ::set_const_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0, k, size, MDATA(x)); } } // namespace devices } // namespace primitiv
8fa5652c0396b7fa749e46a8a174c8ec5d2919a0.cu
#include <primitiv/config.h> #include <primitiv/devices/cuda/device.h> #include <primitiv/internal/cuda/utils.h> #include <primitiv/devices/cuda/ops/common.h> namespace { __global__ void set_const_dev(float k, std::uint32_t size, float *py) { const std::uint32_t i = IDX; if (i < size) py[i] = k; } } // namespace namespace primitiv { namespace devices { void CUDA::reset_tensor_impl(float k, Tensor &x) { const std::uint32_t size = x.shape().size(); const std::uint32_t num_blocks = GRID_SIZE(size, dim1_x_); CUDA_CALL(::cudaSetDevice(dev_id_)); ::set_const_dev<<<num_blocks, dim1_x_>>>(k, size, MDATA(x)); } } // namespace devices } // namespace primitiv