hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
e81bd0808ba08ffdb760245d98d606f1949d2483.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/linalg/rsvd.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <algorithm> namespace raft { namespace linalg { template <typename T> struct RsvdInputs { T tolerance; int n_row; int n_col; float redundancy; T PC_perc; T UpS_perc; int k; int p; bool use_bbt; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const RsvdInputs<T>& dims) { return os; } template <typename T> class RsvdTest : public ::testing::TestWithParam<RsvdInputs<T>> { protected: RsvdTest() : A(0, stream), U(0, stream), S(0, stream), V(0, stream), left_eig_vectors_ref(0, stream), right_eig_vectors_ref(0, stream), sing_vals_ref(0, stream) { } void SetUp() override { raft::resources handle; stream = resource::get_cuda_stream(handle); params = ::testing::TestWithParam<RsvdInputs<T>>::GetParam(); // rSVD seems to be very sensitive to the random number sequence as well! raft::random::RngState r(params.seed, raft::random::GenPC); int m = params.n_row, n = params.n_col; T eig_svd_tol = 1.e-7; int max_sweeps = 100; T mu = 0.0, sigma = 1.0; A.resize(m * n, stream); if (params.tolerance > 1) { // Sanity check ASSERT(m == 3, "This test only supports mxn=3x2!"); ASSERT(m * n == 6, "This test only supports mxn=3x2!"); T data_h[] = {1.0, 4.0, 2.0, 2.0, 5.0, 1.0}; raft::update_device(A.data(), data_h, m * n, stream); T left_eig_vectors_ref_h[] = {-0.308219, -0.906133, -0.289695}; T right_eig_vectors_ref_h[] = {-0.638636, -0.769509}; T sing_vals_ref_h[] = {7.065283}; left_eig_vectors_ref.resize(m, stream); right_eig_vectors_ref.resize(n, stream); sing_vals_ref.resize(1, stream); raft::update_device(left_eig_vectors_ref.data(), left_eig_vectors_ref_h, m * 1, stream); raft::update_device(right_eig_vectors_ref.data(), right_eig_vectors_ref_h, n * 1, stream); raft::update_device(sing_vals_ref.data(), sing_vals_ref_h, 1, stream); } else { // Other normal tests int n_informative = int(0.25f * n); // Informative cols int len_informative = m * n_informative; int n_redundant = n - n_informative; // Redundant cols int len_redundant = m * n_redundant; normal(handle, r, A.data(), len_informative, mu, sigma); RAFT_CUDA_TRY(hipMemcpyAsync(A.data() + len_informative, A.data(), len_redundant * sizeof(T), hipMemcpyDeviceToDevice, stream)); } std::vector<T> A_backup_cpu(m * n); // Backup A matrix as svdJacobi will destroy the content of A raft::update_host(A_backup_cpu.data(), A.data(), m * n, stream); if (params.k == 0) { params.k = ::max((int)(::min(m, n) * params.PC_perc), 1); params.p = ::max((int)(::min(m, n) * params.UpS_perc), 1); } U.resize(m * params.k, stream); S.resize(params.k, stream); V.resize(n * params.k, stream); RAFT_CUDA_TRY(hipMemsetAsync(U.data(), 0, U.size() * sizeof(T), stream)); RAFT_CUDA_TRY(hipMemsetAsync(S.data(), 0, S.size() * sizeof(T), stream)); RAFT_CUDA_TRY(hipMemsetAsync(V.data(), 0, V.size() * sizeof(T), stream)); auto A_view = raft::make_device_matrix_view<const T, int, raft::col_major>(A.data(), m, n); std::optional<raft::device_matrix_view<T, int, raft::col_major>> U_view = raft::make_device_matrix_view<T, int, raft::col_major>(U.data(), m, params.k); std::optional<raft::device_matrix_view<T, int, raft::col_major>> V_view = raft::make_device_matrix_view<T, int, raft::col_major>(V.data(), params.k, n); auto S_vec_view = raft::make_device_vector_view(S.data(), params.k); // RSVD tests if (params.k == 0) { // Test with PC and upsampling ratio if (params.use_bbt) { rsvd_perc_symmetric( handle, A_view, S_vec_view, params.PC_perc, params.UpS_perc, U_view, V_view); } else { rsvd_perc(handle, A_view, S_vec_view, params.PC_perc, params.UpS_perc, U_view, V_view); } } else { // Test with directly given fixed rank if (params.use_bbt) { rsvd_fixed_rank_symmetric_jacobi( handle, A_view, S_vec_view, params.p, eig_svd_tol, max_sweeps, U_view, V_view); } else { rsvd_fixed_rank_jacobi( handle, A_view, S_vec_view, params.p, eig_svd_tol, max_sweeps, U_view, V_view); } } raft::update_device(A.data(), A_backup_cpu.data(), m * n, stream); } protected: hipStream_t stream = 0; RsvdInputs<T> params; rmm::device_uvector<T> A, U, S, V, left_eig_vectors_ref, right_eig_vectors_ref, sing_vals_ref; }; const std::vector<RsvdInputs<float>> inputs_fx = { // Test with ratios {0.20f, 256, 256, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Square + BBT {0.20f, 2048, 256, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Tall + BBT {0.20f, 256, 256, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Square + non-BBT {0.20f, 2048, 256, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Tall + non-BBT {0.20f, 2048, 2048, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Square + BBT {0.60f, 16384, 2048, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Tall + BBT {0.20f, 2048, 2048, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Square + non-BBT {0.60f, 16384, 2048, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL} // Tall + non-BBT , // Test with fixed ranks {0.10f, 256, 256, 0.25f, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Square + BBT {0.12f, 2048, 256, 0.25f, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Tall + BBT {0.10f, 256, 256, 0.25f, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Square + non-BBT {0.12f, 2048, 256, 0.25f, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Tall + non-BBT {0.60f, 2048, 2048, 0.25f, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Square + BBT {1.00f, 16384, 2048, 0.25f, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Tall + BBT {0.60f, 2048, 2048, 0.25f, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Square + non-BBT {1.00f, 16384, 2048, 0.25f, 0.0f, 0.0f, 100, 5, false, 4321ULL} // Tall + non-BBT }; const std::vector<RsvdInputs<double>> inputs_dx = { // Test with ratios {0.20, 256, 256, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL}, // Square + BBT {0.20, 2048, 256, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL}, // Tall + BBT {0.20, 256, 256, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL}, // Square + non-BBT {0.20, 2048, 256, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL}, // Tall + non-BBT {0.20, 2048, 2048, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL}, // Square + BBT {0.60, 16384, 2048, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL}, // Tall + BBT {0.20, 2048, 2048, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL}, // Square + non-BBT {0.60, 16384, 2048, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL} // Tall + non-BBT , // Test with fixed ranks {0.10, 256, 256, 0.25f, 0.0, 0.0, 100, 5, true, 4321ULL}, // Square + BBT {0.12, 2048, 256, 0.25f, 0.0, 0.0, 100, 5, true, 4321ULL}, // Tall + BBT {0.10, 256, 256, 0.25f, 0.0, 0.0, 100, 5, false, 4321ULL}, // Square + non-BBT {0.12, 2048, 256, 0.25f, 0.0, 0.0, 100, 5, false, 4321ULL}, // Tall + non-BBT {0.60, 2048, 2048, 0.25f, 0.0, 0.0, 100, 5, true, 4321ULL}, // Square + BBT {1.00, 16384, 2048, 0.25f, 0.0, 0.0, 100, 5, true, 4321ULL}, // Tall + BBT {0.60, 2048, 2048, 0.25f, 0.0, 0.0, 100, 5, false, 4321ULL}, // Square + non-BBT {1.00, 16384, 2048, 0.25f, 0.0, 0.0, 100, 5, false, 4321ULL} // Tall + non-BBT }; const std::vector<RsvdInputs<float>> sanity_inputs_fx = { {100000000000000000.0f, 3, 2, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL}, {100000000000000000.0f, 3, 2, 0.25f, 0.0f, 0.0f, 1, 1, true, 4321ULL}, {100000000000000000.0f, 3, 2, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL}, {100000000000000000.0f, 3, 2, 0.25f, 0.0f, 0.0f, 1, 1, false, 4321ULL}}; const std::vector<RsvdInputs<double>> sanity_inputs_dx = { {100000000000000000.0, 3, 2, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL}, {100000000000000000.0, 3, 2, 0.25f, 0.0, 0.0, 1, 1, true, 4321ULL}, {100000000000000000.0, 3, 2, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL}, {100000000000000000.0, 3, 2, 0.25f, 0.0, 0.0, 1, 1, false, 4321ULL}}; typedef RsvdTest<float> RsvdSanityCheckValF; TEST_P(RsvdSanityCheckValF, Result) { ASSERT_TRUE(devArrMatch( sing_vals_ref.data(), S.data(), params.k, raft::CompareApproxAbs<float>(params.tolerance))); } typedef RsvdTest<double> RsvdSanityCheckValD; TEST_P(RsvdSanityCheckValD, Result) { ASSERT_TRUE(devArrMatch( sing_vals_ref.data(), S.data(), params.k, raft::CompareApproxAbs<double>(params.tolerance))); } typedef RsvdTest<float> RsvdSanityCheckLeftVecF; TEST_P(RsvdSanityCheckLeftVecF, Result) { ASSERT_TRUE(devArrMatch(left_eig_vectors_ref.data(), U.data(), params.n_row * params.k, raft::CompareApproxAbs<float>(params.tolerance))); } typedef RsvdTest<double> RsvdSanityCheckLeftVecD; TEST_P(RsvdSanityCheckLeftVecD, Result) { ASSERT_TRUE(devArrMatch(left_eig_vectors_ref.data(), U.data(), params.n_row * params.k, raft::CompareApproxAbs<double>(params.tolerance))); } typedef RsvdTest<float> RsvdSanityCheckRightVecF; TEST_P(RsvdSanityCheckRightVecF, Result) { ASSERT_TRUE(devArrMatch(right_eig_vectors_ref.data(), V.data(), params.n_col * params.k, raft::CompareApproxAbs<float>(params.tolerance))); } typedef RsvdTest<double> RsvdSanityCheckRightVecD; TEST_P(RsvdSanityCheckRightVecD, Result) { ASSERT_TRUE(devArrMatch(right_eig_vectors_ref.data(), V.data(), params.n_col * params.k, raft::CompareApproxAbs<double>(params.tolerance))); } typedef RsvdTest<float> RsvdTestSquareMatrixNormF; TEST_P(RsvdTestSquareMatrixNormF, Result) { raft::resources handle; ASSERT_TRUE(raft::linalg::evaluateSVDByL2Norm(handle, A.data(), U.data(), S.data(), V.data(), params.n_row, params.n_col, params.k, 4 * params.tolerance, resource::get_cuda_stream(handle))); } typedef RsvdTest<double> RsvdTestSquareMatrixNormD; TEST_P(RsvdTestSquareMatrixNormD, Result) { raft::resources handle; ASSERT_TRUE(raft::linalg::evaluateSVDByL2Norm(handle, A.data(), U.data(), S.data(), V.data(), params.n_row, params.n_col, params.k, 4 * params.tolerance, resource::get_cuda_stream(handle))); } INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckValF, ::testing::ValuesIn(sanity_inputs_fx)); INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckValD, ::testing::ValuesIn(sanity_inputs_dx)); INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckLeftVecF, ::testing::ValuesIn(sanity_inputs_fx)); INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckLeftVecD, ::testing::ValuesIn(sanity_inputs_dx)); INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckRightVecF, ::testing::ValuesIn(sanity_inputs_fx)); INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckRightVecD, ::testing::ValuesIn(sanity_inputs_dx)); INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdTestSquareMatrixNormF, ::testing::ValuesIn(inputs_fx)); INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdTestSquareMatrixNormD, ::testing::ValuesIn(inputs_dx)); } // end namespace linalg } // end namespace raft
e81bd0808ba08ffdb760245d98d606f1949d2483.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/linalg/rsvd.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <algorithm> namespace raft { namespace linalg { template <typename T> struct RsvdInputs { T tolerance; int n_row; int n_col; float redundancy; T PC_perc; T UpS_perc; int k; int p; bool use_bbt; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const RsvdInputs<T>& dims) { return os; } template <typename T> class RsvdTest : public ::testing::TestWithParam<RsvdInputs<T>> { protected: RsvdTest() : A(0, stream), U(0, stream), S(0, stream), V(0, stream), left_eig_vectors_ref(0, stream), right_eig_vectors_ref(0, stream), sing_vals_ref(0, stream) { } void SetUp() override { raft::resources handle; stream = resource::get_cuda_stream(handle); params = ::testing::TestWithParam<RsvdInputs<T>>::GetParam(); // rSVD seems to be very sensitive to the random number sequence as well! raft::random::RngState r(params.seed, raft::random::GenPC); int m = params.n_row, n = params.n_col; T eig_svd_tol = 1.e-7; int max_sweeps = 100; T mu = 0.0, sigma = 1.0; A.resize(m * n, stream); if (params.tolerance > 1) { // Sanity check ASSERT(m == 3, "This test only supports mxn=3x2!"); ASSERT(m * n == 6, "This test only supports mxn=3x2!"); T data_h[] = {1.0, 4.0, 2.0, 2.0, 5.0, 1.0}; raft::update_device(A.data(), data_h, m * n, stream); T left_eig_vectors_ref_h[] = {-0.308219, -0.906133, -0.289695}; T right_eig_vectors_ref_h[] = {-0.638636, -0.769509}; T sing_vals_ref_h[] = {7.065283}; left_eig_vectors_ref.resize(m, stream); right_eig_vectors_ref.resize(n, stream); sing_vals_ref.resize(1, stream); raft::update_device(left_eig_vectors_ref.data(), left_eig_vectors_ref_h, m * 1, stream); raft::update_device(right_eig_vectors_ref.data(), right_eig_vectors_ref_h, n * 1, stream); raft::update_device(sing_vals_ref.data(), sing_vals_ref_h, 1, stream); } else { // Other normal tests int n_informative = int(0.25f * n); // Informative cols int len_informative = m * n_informative; int n_redundant = n - n_informative; // Redundant cols int len_redundant = m * n_redundant; normal(handle, r, A.data(), len_informative, mu, sigma); RAFT_CUDA_TRY(cudaMemcpyAsync(A.data() + len_informative, A.data(), len_redundant * sizeof(T), cudaMemcpyDeviceToDevice, stream)); } std::vector<T> A_backup_cpu(m * n); // Backup A matrix as svdJacobi will destroy the content of A raft::update_host(A_backup_cpu.data(), A.data(), m * n, stream); if (params.k == 0) { params.k = std::max((int)(std::min(m, n) * params.PC_perc), 1); params.p = std::max((int)(std::min(m, n) * params.UpS_perc), 1); } U.resize(m * params.k, stream); S.resize(params.k, stream); V.resize(n * params.k, stream); RAFT_CUDA_TRY(cudaMemsetAsync(U.data(), 0, U.size() * sizeof(T), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(S.data(), 0, S.size() * sizeof(T), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(V.data(), 0, V.size() * sizeof(T), stream)); auto A_view = raft::make_device_matrix_view<const T, int, raft::col_major>(A.data(), m, n); std::optional<raft::device_matrix_view<T, int, raft::col_major>> U_view = raft::make_device_matrix_view<T, int, raft::col_major>(U.data(), m, params.k); std::optional<raft::device_matrix_view<T, int, raft::col_major>> V_view = raft::make_device_matrix_view<T, int, raft::col_major>(V.data(), params.k, n); auto S_vec_view = raft::make_device_vector_view(S.data(), params.k); // RSVD tests if (params.k == 0) { // Test with PC and upsampling ratio if (params.use_bbt) { rsvd_perc_symmetric( handle, A_view, S_vec_view, params.PC_perc, params.UpS_perc, U_view, V_view); } else { rsvd_perc(handle, A_view, S_vec_view, params.PC_perc, params.UpS_perc, U_view, V_view); } } else { // Test with directly given fixed rank if (params.use_bbt) { rsvd_fixed_rank_symmetric_jacobi( handle, A_view, S_vec_view, params.p, eig_svd_tol, max_sweeps, U_view, V_view); } else { rsvd_fixed_rank_jacobi( handle, A_view, S_vec_view, params.p, eig_svd_tol, max_sweeps, U_view, V_view); } } raft::update_device(A.data(), A_backup_cpu.data(), m * n, stream); } protected: cudaStream_t stream = 0; RsvdInputs<T> params; rmm::device_uvector<T> A, U, S, V, left_eig_vectors_ref, right_eig_vectors_ref, sing_vals_ref; }; const std::vector<RsvdInputs<float>> inputs_fx = { // Test with ratios {0.20f, 256, 256, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Square + BBT {0.20f, 2048, 256, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Tall + BBT {0.20f, 256, 256, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Square + non-BBT {0.20f, 2048, 256, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Tall + non-BBT {0.20f, 2048, 2048, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Square + BBT {0.60f, 16384, 2048, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Tall + BBT {0.20f, 2048, 2048, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Square + non-BBT {0.60f, 16384, 2048, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL} // Tall + non-BBT , // Test with fixed ranks {0.10f, 256, 256, 0.25f, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Square + BBT {0.12f, 2048, 256, 0.25f, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Tall + BBT {0.10f, 256, 256, 0.25f, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Square + non-BBT {0.12f, 2048, 256, 0.25f, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Tall + non-BBT {0.60f, 2048, 2048, 0.25f, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Square + BBT {1.00f, 16384, 2048, 0.25f, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Tall + BBT {0.60f, 2048, 2048, 0.25f, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Square + non-BBT {1.00f, 16384, 2048, 0.25f, 0.0f, 0.0f, 100, 5, false, 4321ULL} // Tall + non-BBT }; const std::vector<RsvdInputs<double>> inputs_dx = { // Test with ratios {0.20, 256, 256, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL}, // Square + BBT {0.20, 2048, 256, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL}, // Tall + BBT {0.20, 256, 256, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL}, // Square + non-BBT {0.20, 2048, 256, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL}, // Tall + non-BBT {0.20, 2048, 2048, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL}, // Square + BBT {0.60, 16384, 2048, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL}, // Tall + BBT {0.20, 2048, 2048, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL}, // Square + non-BBT {0.60, 16384, 2048, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL} // Tall + non-BBT , // Test with fixed ranks {0.10, 256, 256, 0.25f, 0.0, 0.0, 100, 5, true, 4321ULL}, // Square + BBT {0.12, 2048, 256, 0.25f, 0.0, 0.0, 100, 5, true, 4321ULL}, // Tall + BBT {0.10, 256, 256, 0.25f, 0.0, 0.0, 100, 5, false, 4321ULL}, // Square + non-BBT {0.12, 2048, 256, 0.25f, 0.0, 0.0, 100, 5, false, 4321ULL}, // Tall + non-BBT {0.60, 2048, 2048, 0.25f, 0.0, 0.0, 100, 5, true, 4321ULL}, // Square + BBT {1.00, 16384, 2048, 0.25f, 0.0, 0.0, 100, 5, true, 4321ULL}, // Tall + BBT {0.60, 2048, 2048, 0.25f, 0.0, 0.0, 100, 5, false, 4321ULL}, // Square + non-BBT {1.00, 16384, 2048, 0.25f, 0.0, 0.0, 100, 5, false, 4321ULL} // Tall + non-BBT }; const std::vector<RsvdInputs<float>> sanity_inputs_fx = { {100000000000000000.0f, 3, 2, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL}, {100000000000000000.0f, 3, 2, 0.25f, 0.0f, 0.0f, 1, 1, true, 4321ULL}, {100000000000000000.0f, 3, 2, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL}, {100000000000000000.0f, 3, 2, 0.25f, 0.0f, 0.0f, 1, 1, false, 4321ULL}}; const std::vector<RsvdInputs<double>> sanity_inputs_dx = { {100000000000000000.0, 3, 2, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL}, {100000000000000000.0, 3, 2, 0.25f, 0.0, 0.0, 1, 1, true, 4321ULL}, {100000000000000000.0, 3, 2, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL}, {100000000000000000.0, 3, 2, 0.25f, 0.0, 0.0, 1, 1, false, 4321ULL}}; typedef RsvdTest<float> RsvdSanityCheckValF; TEST_P(RsvdSanityCheckValF, Result) { ASSERT_TRUE(devArrMatch( sing_vals_ref.data(), S.data(), params.k, raft::CompareApproxAbs<float>(params.tolerance))); } typedef RsvdTest<double> RsvdSanityCheckValD; TEST_P(RsvdSanityCheckValD, Result) { ASSERT_TRUE(devArrMatch( sing_vals_ref.data(), S.data(), params.k, raft::CompareApproxAbs<double>(params.tolerance))); } typedef RsvdTest<float> RsvdSanityCheckLeftVecF; TEST_P(RsvdSanityCheckLeftVecF, Result) { ASSERT_TRUE(devArrMatch(left_eig_vectors_ref.data(), U.data(), params.n_row * params.k, raft::CompareApproxAbs<float>(params.tolerance))); } typedef RsvdTest<double> RsvdSanityCheckLeftVecD; TEST_P(RsvdSanityCheckLeftVecD, Result) { ASSERT_TRUE(devArrMatch(left_eig_vectors_ref.data(), U.data(), params.n_row * params.k, raft::CompareApproxAbs<double>(params.tolerance))); } typedef RsvdTest<float> RsvdSanityCheckRightVecF; TEST_P(RsvdSanityCheckRightVecF, Result) { ASSERT_TRUE(devArrMatch(right_eig_vectors_ref.data(), V.data(), params.n_col * params.k, raft::CompareApproxAbs<float>(params.tolerance))); } typedef RsvdTest<double> RsvdSanityCheckRightVecD; TEST_P(RsvdSanityCheckRightVecD, Result) { ASSERT_TRUE(devArrMatch(right_eig_vectors_ref.data(), V.data(), params.n_col * params.k, raft::CompareApproxAbs<double>(params.tolerance))); } typedef RsvdTest<float> RsvdTestSquareMatrixNormF; TEST_P(RsvdTestSquareMatrixNormF, Result) { raft::resources handle; ASSERT_TRUE(raft::linalg::evaluateSVDByL2Norm(handle, A.data(), U.data(), S.data(), V.data(), params.n_row, params.n_col, params.k, 4 * params.tolerance, resource::get_cuda_stream(handle))); } typedef RsvdTest<double> RsvdTestSquareMatrixNormD; TEST_P(RsvdTestSquareMatrixNormD, Result) { raft::resources handle; ASSERT_TRUE(raft::linalg::evaluateSVDByL2Norm(handle, A.data(), U.data(), S.data(), V.data(), params.n_row, params.n_col, params.k, 4 * params.tolerance, resource::get_cuda_stream(handle))); } INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckValF, ::testing::ValuesIn(sanity_inputs_fx)); INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckValD, ::testing::ValuesIn(sanity_inputs_dx)); INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckLeftVecF, ::testing::ValuesIn(sanity_inputs_fx)); INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckLeftVecD, ::testing::ValuesIn(sanity_inputs_dx)); INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckRightVecF, ::testing::ValuesIn(sanity_inputs_fx)); INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckRightVecD, ::testing::ValuesIn(sanity_inputs_dx)); INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdTestSquareMatrixNormF, ::testing::ValuesIn(inputs_fx)); INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdTestSquareMatrixNormD, ::testing::ValuesIn(inputs_dx)); } // end namespace linalg } // end namespace raft
bd1f5838472dee74cd0473a118a93edff4e07bf8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* EXAMPLE SOURCE : https://forums.developer.nvidia.com/t/reading-globaltimer-register-or-calling-clock-clock64-in-loop-prevent-concurrent-kernel-execution/48600/8 https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#special-registers-clock64 https://docs.nvidia.com/cuda/inline-ptx-assembly/index.html generating Asm PTX code https://developer.nvidia.com/blog/cuda-pro-tip-view-assembly-code-correlation-nsight-visual-studio-edition/ https://stackoverflow.com/questions/20482686/how-to-get-the-assembly-code-of-a-cuda-kernel $ nvcc -ptx -o kernel.ptx kernel.cu .func (.param .b64 func_retval0) clock64( ) { .reg .b64 %rd<3>; // inline asm mov.u64 %rd1, %clock64; // inline asm mov.b64 %rd2, %rd1; st.param.b64 [func_retval0+0], %rd2; ret; } COMPILATION: /usr/local/cuda-10.2/bin/nvcc -ccbin g++ -I../common/inc -m64 -g -G -gencode arch=compute_30,code=sm_30 -gencode arch=compute_30,code=compute_30 -o clock2 clock2.cu /usr/local/cuda-10.2/bin/nvcc -ccbin g++ -I../common/inc -m64 -g -G -gencode arch=compute_72,code=sm_72 -gencode arch=compute_72,code=compute_72 -o clock2 clock2.cu THIS EXAMPLE WORKS, WITH SOME LITTLE EXTRA TIME */ #include <stdio.h> #define DELAY_VAL 5000000000ULL // about 5 secs __global__ void child(){ unsigned long long start = clock64(); for(long long int i=0;i<10000000;i++); //while (clock64()< start+DELAY_VAL); } int main(int argc, char* argv[]) { hipStream_t st1; hipStreamCreate(&st1); clock_t ck_start = clock(); hipLaunchKernelGGL(( child), dim3(1),dim3(1),0,st1, ); hipDeviceSynchronize(); printf ("Kernel: %ld clicks.\n", clock()-ck_start); return 0; }
bd1f5838472dee74cd0473a118a93edff4e07bf8.cu
/* EXAMPLE SOURCE : https://forums.developer.nvidia.com/t/reading-globaltimer-register-or-calling-clock-clock64-in-loop-prevent-concurrent-kernel-execution/48600/8 https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#special-registers-clock64 https://docs.nvidia.com/cuda/inline-ptx-assembly/index.html generating Asm PTX code https://developer.nvidia.com/blog/cuda-pro-tip-view-assembly-code-correlation-nsight-visual-studio-edition/ https://stackoverflow.com/questions/20482686/how-to-get-the-assembly-code-of-a-cuda-kernel $ nvcc -ptx -o kernel.ptx kernel.cu .func (.param .b64 func_retval0) clock64( ) { .reg .b64 %rd<3>; // inline asm mov.u64 %rd1, %clock64; // inline asm mov.b64 %rd2, %rd1; st.param.b64 [func_retval0+0], %rd2; ret; } COMPILATION: /usr/local/cuda-10.2/bin/nvcc -ccbin g++ -I../common/inc -m64 -g -G -gencode arch=compute_30,code=sm_30 -gencode arch=compute_30,code=compute_30 -o clock2 clock2.cu /usr/local/cuda-10.2/bin/nvcc -ccbin g++ -I../common/inc -m64 -g -G -gencode arch=compute_72,code=sm_72 -gencode arch=compute_72,code=compute_72 -o clock2 clock2.cu THIS EXAMPLE WORKS, WITH SOME LITTLE EXTRA TIME */ #include <stdio.h> #define DELAY_VAL 5000000000ULL // about 5 secs __global__ void child(){ unsigned long long start = clock64(); for(long long int i=0;i<10000000;i++); //while (clock64()< start+DELAY_VAL); } int main(int argc, char* argv[]) { cudaStream_t st1; cudaStreamCreate(&st1); clock_t ck_start = clock(); child<<<1,1,0,st1>>>(); cudaDeviceSynchronize(); printf ("Kernel: %ld clicks.\n", clock()-ck_start); return 0; }
9d1ca45b9e5fb3e8113f28518b2058c8db71f3c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // REQUIRES: x86-registered-target, amdgpu-registered-target // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \ // RUN: -emit-llvm -o - -x hip %s | FileCheck \ // RUN: -check-prefixes=COMMON,DEV,NORDC-D %s // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \ // RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.dev // RUN: cat %t.dev | FileCheck -check-prefixes=COMMON,DEV,RDC-D %s // RUN: %clang_cc1 -triple x86_64-gnu-linux -std=c++11 \ // RUN: -emit-llvm -o - -x hip %s | FileCheck \ // RUN: -check-prefixes=COMMON,HOST,NORDC %s // RUN: %clang_cc1 -triple x86_64-gnu-linux -std=c++11 \ // RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.host // RUN: cat %t.host | FileCheck -check-prefixes=COMMON,HOST,RDC %s // Check device and host compilation use the same postfix for static // variable name. // RUN: cat %t.dev %t.host | FileCheck -check-prefix=POSTFIX %s #include "Inputs/cuda.h" struct vec { float x,y,z; }; // DEV-DAG: @x.managed = addrspace(1) externally_initialized global i32 1, align 4 // DEV-DAG: @x = addrspace(1) externally_initialized global i32 addrspace(1)* null // NORDC-DAG: @x.managed = internal global i32 1 // RDC-DAG: @x.managed = global i32 1 // NORDC-DAG: @x = internal externally_initialized global i32* null // RDC-DAG: @x = externally_initialized global i32* null // HOST-DAG: @[[DEVNAMEX:[0-9]+]] = {{.*}}c"x\00" __managed__ int x = 1; // DEV-DAG: @v.managed = addrspace(1) externally_initialized global [100 x %struct.vec] zeroinitializer, align 4 // DEV-DAG: @v = addrspace(1) externally_initialized global [100 x %struct.vec] addrspace(1)* null __managed__ vec v[100]; // DEV-DAG: @v2.managed = addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> <{ %struct.vec { float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 }, [99 x %struct.vec] zeroinitializer }>, align 4 // DEV-DAG: @v2 = addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> addrspace(1)* null __managed__ vec v2[100] = {{1, 1, 1}}; // DEV-DAG: @ex.managed = external addrspace(1) global i32, align 4 // DEV-DAG: @ex = external addrspace(1) externally_initialized global i32 addrspace(1)* // HOST-DAG: @ex.managed = external global i32 // HOST-DAG: @ex = external externally_initialized global i32* extern __managed__ int ex; // NORDC-D-DAG: @_ZL2sx.managed = addrspace(1) externally_initialized global i32 1, align 4 // NORDC-D-DAG: @_ZL2sx = addrspace(1) externally_initialized global i32 addrspace(1)* null // RDC-D-DAG: @_ZL2sx__static__[[HASH:.*]].managed = addrspace(1) externally_initialized global i32 1, align 4 // RDC-D-DAG: @_ZL2sx__static__[[HASH]] = addrspace(1) externally_initialized global i32 addrspace(1)* null // HOST-DAG: @_ZL2sx.managed = internal global i32 1 // HOST-DAG: @_ZL2sx = internal externally_initialized global i32* null // NORDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx\00" // RDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx__static__[[HASH:.*]]\00" // POSTFIX: @_ZL2sx__static__[[HASH:.*]] = addrspace(1) externally_initialized global i32 addrspace(1)* null // POSTFIX: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx__static__[[HASH]]\00" static __managed__ int sx = 1; // DEV-DAG: @llvm.compiler.used // DEV-SAME-DAG: @x.managed // DEV-SAME-DAG: @x // DEV-SAME-DAG: @v.managed // DEV-SAME-DAG: @v // DEV-SAME-DAG: @_ZL2sx.managed // DEV-SAME-DAG: @_ZL2sx // Force ex and sx mitted in device compilation. __global__ void foo(int *z) { *z = x + ex + sx; v[1].x = 2; } // Force ex and sx emitted in host compilatioin. int foo2() { return ex + sx; } // COMMON-LABEL: define {{.*}}@_Z4loadv() // DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4 // DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32* // DEV: %1 = load i32, i32* %0, align 4 // DEV: ret i32 %1 // HOST: %ld.managed = load i32*, i32** @x, align 4 // HOST: %0 = load i32, i32* %ld.managed, align 4 // HOST: ret i32 %0 __device__ __host__ int load() { return x; } // COMMON-LABEL: define {{.*}}@_Z5storev() // DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4 // DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32* // DEV: store i32 2, i32* %0, align 4 // HOST: %ld.managed = load i32*, i32** @x, align 4 // HOST: store i32 2, i32* %ld.managed, align 4 __device__ __host__ void store() { x = 2; } // COMMON-LABEL: define {{.*}}@_Z10addr_takenv() // DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32* // DEV: store i32* %0, i32** %p.ascast, align 8 // DEV: %1 = load i32*, i32** %p.ascast, align 8 // DEV: store i32 3, i32* %1, align 4 // HOST: %ld.managed = load i32*, i32** @x, align 4 // HOST: store i32* %ld.managed, i32** %p, align 8 // HOST: %0 = load i32*, i32** %p, align 8 // HOST: store i32 3, i32* %0, align 4 __device__ __host__ void addr_taken() { int *p = &x; *p = 3; } // HOST-LABEL: define {{.*}}@_Z5load2v() // HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16 // HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0 // HOST: %1 = load float, float* %0, align 4 // HOST: ret float %1 __device__ __host__ float load2() { return v[1].x; } // HOST-LABEL: define {{.*}}@_Z5load3v() // HOST: %ld.managed = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16 // HOST: %0 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed to [100 x %struct.vec]* // HOST: %1 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %0, i64 0, i64 1, i32 1 // HOST: %2 = load float, float* %1, align 4 // HOST: ret float %2 float load3() { return v2[1].y; } // HOST-LABEL: define {{.*}}@_Z11addr_taken2v() // HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16 // HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0 // HOST: %1 = ptrtoint float* %0 to i64 // HOST: %ld.managed1 = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16 // HOST: %2 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed1 to [100 x %struct.vec]* // HOST: %3 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %2, i64 0, i64 1, i32 1 // HOST: %4 = ptrtoint float* %3 to i64 // HOST: %5 = sub i64 %4, %1 // HOST: %6 = sdiv exact i64 %5, 4 // HOST: %7 = sitofp i64 %6 to float // HOST: ret float %7 float addr_taken2() { return (float)reinterpret_cast<long>(&(v2[1].y)-&(v[1].x)); } // COMMON-LABEL: define {{.*}}@_Z5load4v() // DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @ex, align 4 // DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32* // DEV: %1 = load i32, i32* %0, align 4 // DEV: ret i32 %1 // HOST: %ld.managed = load i32*, i32** @ex, align 4 // HOST: %0 = load i32, i32* %ld.managed, align 4 // HOST: ret i32 %0 __device__ __host__ int load4() { return ex; } // HOST-DAG: __hipRegisterManagedVar({{.*}}@x {{.*}}@x.managed {{.*}}@[[DEVNAMEX]]{{.*}}, i64 4, i32 4) // HOST-DAG: __hipRegisterManagedVar({{.*}}@_ZL2sx {{.*}}@_ZL2sx.managed {{.*}}@[[DEVNAMESX]] // HOST-NOT: __hipRegisterManagedVar({{.*}}@ex {{.*}}@ex.managed // HOST-DAG: declare void @__hipRegisterManagedVar(i8**, i8*, i8*, i8*, i64, i32)
9d1ca45b9e5fb3e8113f28518b2058c8db71f3c4.cu
// REQUIRES: x86-registered-target, amdgpu-registered-target // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \ // RUN: -emit-llvm -o - -x hip %s | FileCheck \ // RUN: -check-prefixes=COMMON,DEV,NORDC-D %s // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \ // RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.dev // RUN: cat %t.dev | FileCheck -check-prefixes=COMMON,DEV,RDC-D %s // RUN: %clang_cc1 -triple x86_64-gnu-linux -std=c++11 \ // RUN: -emit-llvm -o - -x hip %s | FileCheck \ // RUN: -check-prefixes=COMMON,HOST,NORDC %s // RUN: %clang_cc1 -triple x86_64-gnu-linux -std=c++11 \ // RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.host // RUN: cat %t.host | FileCheck -check-prefixes=COMMON,HOST,RDC %s // Check device and host compilation use the same postfix for static // variable name. // RUN: cat %t.dev %t.host | FileCheck -check-prefix=POSTFIX %s #include "Inputs/cuda.h" struct vec { float x,y,z; }; // DEV-DAG: @x.managed = addrspace(1) externally_initialized global i32 1, align 4 // DEV-DAG: @x = addrspace(1) externally_initialized global i32 addrspace(1)* null // NORDC-DAG: @x.managed = internal global i32 1 // RDC-DAG: @x.managed = global i32 1 // NORDC-DAG: @x = internal externally_initialized global i32* null // RDC-DAG: @x = externally_initialized global i32* null // HOST-DAG: @[[DEVNAMEX:[0-9]+]] = {{.*}}c"x\00" __managed__ int x = 1; // DEV-DAG: @v.managed = addrspace(1) externally_initialized global [100 x %struct.vec] zeroinitializer, align 4 // DEV-DAG: @v = addrspace(1) externally_initialized global [100 x %struct.vec] addrspace(1)* null __managed__ vec v[100]; // DEV-DAG: @v2.managed = addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> <{ %struct.vec { float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 }, [99 x %struct.vec] zeroinitializer }>, align 4 // DEV-DAG: @v2 = addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> addrspace(1)* null __managed__ vec v2[100] = {{1, 1, 1}}; // DEV-DAG: @ex.managed = external addrspace(1) global i32, align 4 // DEV-DAG: @ex = external addrspace(1) externally_initialized global i32 addrspace(1)* // HOST-DAG: @ex.managed = external global i32 // HOST-DAG: @ex = external externally_initialized global i32* extern __managed__ int ex; // NORDC-D-DAG: @_ZL2sx.managed = addrspace(1) externally_initialized global i32 1, align 4 // NORDC-D-DAG: @_ZL2sx = addrspace(1) externally_initialized global i32 addrspace(1)* null // RDC-D-DAG: @_ZL2sx__static__[[HASH:.*]].managed = addrspace(1) externally_initialized global i32 1, align 4 // RDC-D-DAG: @_ZL2sx__static__[[HASH]] = addrspace(1) externally_initialized global i32 addrspace(1)* null // HOST-DAG: @_ZL2sx.managed = internal global i32 1 // HOST-DAG: @_ZL2sx = internal externally_initialized global i32* null // NORDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx\00" // RDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx__static__[[HASH:.*]]\00" // POSTFIX: @_ZL2sx__static__[[HASH:.*]] = addrspace(1) externally_initialized global i32 addrspace(1)* null // POSTFIX: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx__static__[[HASH]]\00" static __managed__ int sx = 1; // DEV-DAG: @llvm.compiler.used // DEV-SAME-DAG: @x.managed // DEV-SAME-DAG: @x // DEV-SAME-DAG: @v.managed // DEV-SAME-DAG: @v // DEV-SAME-DAG: @_ZL2sx.managed // DEV-SAME-DAG: @_ZL2sx // Force ex and sx mitted in device compilation. __global__ void foo(int *z) { *z = x + ex + sx; v[1].x = 2; } // Force ex and sx emitted in host compilatioin. int foo2() { return ex + sx; } // COMMON-LABEL: define {{.*}}@_Z4loadv() // DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4 // DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32* // DEV: %1 = load i32, i32* %0, align 4 // DEV: ret i32 %1 // HOST: %ld.managed = load i32*, i32** @x, align 4 // HOST: %0 = load i32, i32* %ld.managed, align 4 // HOST: ret i32 %0 __device__ __host__ int load() { return x; } // COMMON-LABEL: define {{.*}}@_Z5storev() // DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4 // DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32* // DEV: store i32 2, i32* %0, align 4 // HOST: %ld.managed = load i32*, i32** @x, align 4 // HOST: store i32 2, i32* %ld.managed, align 4 __device__ __host__ void store() { x = 2; } // COMMON-LABEL: define {{.*}}@_Z10addr_takenv() // DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32* // DEV: store i32* %0, i32** %p.ascast, align 8 // DEV: %1 = load i32*, i32** %p.ascast, align 8 // DEV: store i32 3, i32* %1, align 4 // HOST: %ld.managed = load i32*, i32** @x, align 4 // HOST: store i32* %ld.managed, i32** %p, align 8 // HOST: %0 = load i32*, i32** %p, align 8 // HOST: store i32 3, i32* %0, align 4 __device__ __host__ void addr_taken() { int *p = &x; *p = 3; } // HOST-LABEL: define {{.*}}@_Z5load2v() // HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16 // HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0 // HOST: %1 = load float, float* %0, align 4 // HOST: ret float %1 __device__ __host__ float load2() { return v[1].x; } // HOST-LABEL: define {{.*}}@_Z5load3v() // HOST: %ld.managed = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16 // HOST: %0 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed to [100 x %struct.vec]* // HOST: %1 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %0, i64 0, i64 1, i32 1 // HOST: %2 = load float, float* %1, align 4 // HOST: ret float %2 float load3() { return v2[1].y; } // HOST-LABEL: define {{.*}}@_Z11addr_taken2v() // HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16 // HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0 // HOST: %1 = ptrtoint float* %0 to i64 // HOST: %ld.managed1 = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16 // HOST: %2 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed1 to [100 x %struct.vec]* // HOST: %3 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %2, i64 0, i64 1, i32 1 // HOST: %4 = ptrtoint float* %3 to i64 // HOST: %5 = sub i64 %4, %1 // HOST: %6 = sdiv exact i64 %5, 4 // HOST: %7 = sitofp i64 %6 to float // HOST: ret float %7 float addr_taken2() { return (float)reinterpret_cast<long>(&(v2[1].y)-&(v[1].x)); } // COMMON-LABEL: define {{.*}}@_Z5load4v() // DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @ex, align 4 // DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32* // DEV: %1 = load i32, i32* %0, align 4 // DEV: ret i32 %1 // HOST: %ld.managed = load i32*, i32** @ex, align 4 // HOST: %0 = load i32, i32* %ld.managed, align 4 // HOST: ret i32 %0 __device__ __host__ int load4() { return ex; } // HOST-DAG: __hipRegisterManagedVar({{.*}}@x {{.*}}@x.managed {{.*}}@[[DEVNAMEX]]{{.*}}, i64 4, i32 4) // HOST-DAG: __hipRegisterManagedVar({{.*}}@_ZL2sx {{.*}}@_ZL2sx.managed {{.*}}@[[DEVNAMESX]] // HOST-NOT: __hipRegisterManagedVar({{.*}}@ex {{.*}}@ex.managed // HOST-DAG: declare void @__hipRegisterManagedVar(i8**, i8*, i8*, i8*, i64, i32)
718c3f06e7dd03177d6dd30e8f9ed7bda307b47f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> #include <time.h> /**************************************************************************** nvcc -o cudaaz99 az99.cu To Run: ./cudaaz99 > cudares.txt *****************************************************************************/ __device__ int match(char *attempt) { char pw1[] = "CV78"; char pw2[] = "FT81"; char pw3[] = "HS21"; char pw4[] = "SC63"; char *a = attempt; char *b = attempt; char *c = attempt; char *d = attempt; char *p1 = pw1; char *p2 = pw2; char *p3 = pw3; char *p4 = pw4; while(*a == *p1) { if(*a == '\0') { printf("Password: %s\n",pw1); break; } a++; p1++; } while(*b == *p2) { if(*b == '\0') { printf("Password: %s\n",pw2); break; } b++; p2++; } while(*c == *p3) { if(*c == '\0') { printf("Password: %s\n",pw3); break; } c++; p3++; } while(*d == *p4) { if(*d == '\0') { printf("Password: %s\n",pw4); break; } d++; p4++; } return 0; } /**************************************************************************** The kernel function assume that there will be only one thread and uses nested loops to generate all possible passwords and test whether they match the hidden password. *****************************************************************************/ __global__ void kernel() { char k1,k2; char pass[5]; pass[4] = '\0'; int i = blockIdx.x+65; int j = threadIdx.x+65; char matchone = i; char matchtwo = j; pass[0] = matchone; pass[1] = matchtwo; for(k1='0'; k1<='9'; k1++){ for(k2='0'; k2<='9'; k2++){ pass[2] = k1; pass[3] = k2; if(match(pass)) { //printf("Password Cracked"); } else { //printf("tried: %s\n", pass); } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); hipLaunchKernelGGL(( kernel) , dim3(26),dim3(26), 0, 0, ); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
718c3f06e7dd03177d6dd30e8f9ed7bda307b47f.cu
#include <stdio.h> #include <cuda_runtime_api.h> #include <time.h> /**************************************************************************** nvcc -o cudaaz99 az99.cu To Run: ./cudaaz99 > cudares.txt *****************************************************************************/ __device__ int match(char *attempt) { char pw1[] = "CV78"; char pw2[] = "FT81"; char pw3[] = "HS21"; char pw4[] = "SC63"; char *a = attempt; char *b = attempt; char *c = attempt; char *d = attempt; char *p1 = pw1; char *p2 = pw2; char *p3 = pw3; char *p4 = pw4; while(*a == *p1) { if(*a == '\0') { printf("Password: %s\n",pw1); break; } a++; p1++; } while(*b == *p2) { if(*b == '\0') { printf("Password: %s\n",pw2); break; } b++; p2++; } while(*c == *p3) { if(*c == '\0') { printf("Password: %s\n",pw3); break; } c++; p3++; } while(*d == *p4) { if(*d == '\0') { printf("Password: %s\n",pw4); break; } d++; p4++; } return 0; } /**************************************************************************** The kernel function assume that there will be only one thread and uses nested loops to generate all possible passwords and test whether they match the hidden password. *****************************************************************************/ __global__ void kernel() { char k1,k2; char pass[5]; pass[4] = '\0'; int i = blockIdx.x+65; int j = threadIdx.x+65; char matchone = i; char matchtwo = j; pass[0] = matchone; pass[1] = matchtwo; for(k1='0'; k1<='9'; k1++){ for(k2='0'; k2<='9'; k2++){ pass[2] = k1; pass[3] = k2; if(match(pass)) { //printf("Password Cracked"); } else { //printf("tried: %s\n", pass); } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); kernel <<<26,26>>>(); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
4543e2afea6d6870776999feed08bda971ccfef6.hip
// !!! This is a file automatically generated by hipify!!! #include "../../common/book.h" int main(void) { hipDeviceProp_t prop; int count; HANDLE_ERROR(hipGetDeviceCount(&count)); for (int i = 0; i < count; i++) { HANDLE_ERROR(hipGetDeviceProperties(&prop, i)); // This returns a structure of type hipDeviceProp_t printf("--- General Information for device %d ---\n", i+1); printf("Name: %s\n", prop.name); printf("Compute capability: %d.%d\n", prop.major, prop.minor); printf("Clock rate: %d\n", prop.clockRate); printf("Device copy overlap: "); if (prop.deviceOverlap) printf("Enabled\n"); else printf("Disabled\n"); printf("Kernel execution timeout: "); if (prop.kernelExecTimeoutEnabled) printf("Enabled\n"); else printf("Disabled\n"); printf("\n---Memory Information for device %d ---\n", i+1); printf("Total global mem: %zu\n", prop.totalGlobalMem); printf("Total constant mem: %zu\n", prop.totalConstMem); printf("Max mem pitch: %zu\n", prop.memPitch); printf("Texture Alignment: %zu\n", prop.textureAlignment); printf("\n--- MP Information for device %d ---\n", i+1); printf("Multiprocessor count: %d\n", prop.multiProcessorCount); printf("Shared mem per mp: %zu\n", prop.sharedMemPerBlock); printf("Registers per mp: %d\n", prop.regsPerBlock); printf("Threads in warp: %d\n", prop.warpSize); printf("Max threads per block: %d\n", prop.maxThreadsPerBlock); printf("Max thread dimension: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf("Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf("\n"); } }
4543e2afea6d6870776999feed08bda971ccfef6.cu
#include "../../common/book.h" int main(void) { cudaDeviceProp prop; int count; HANDLE_ERROR(cudaGetDeviceCount(&count)); for (int i = 0; i < count; i++) { HANDLE_ERROR(cudaGetDeviceProperties(&prop, i)); // This returns a structure of type cudaDeviceProp printf("--- General Information for device %d ---\n", i+1); printf("Name: %s\n", prop.name); printf("Compute capability: %d.%d\n", prop.major, prop.minor); printf("Clock rate: %d\n", prop.clockRate); printf("Device copy overlap: "); if (prop.deviceOverlap) printf("Enabled\n"); else printf("Disabled\n"); printf("Kernel execution timeout: "); if (prop.kernelExecTimeoutEnabled) printf("Enabled\n"); else printf("Disabled\n"); printf("\n---Memory Information for device %d ---\n", i+1); printf("Total global mem: %zu\n", prop.totalGlobalMem); printf("Total constant mem: %zu\n", prop.totalConstMem); printf("Max mem pitch: %zu\n", prop.memPitch); printf("Texture Alignment: %zu\n", prop.textureAlignment); printf("\n--- MP Information for device %d ---\n", i+1); printf("Multiprocessor count: %d\n", prop.multiProcessorCount); printf("Shared mem per mp: %zu\n", prop.sharedMemPerBlock); printf("Registers per mp: %d\n", prop.regsPerBlock); printf("Threads in warp: %d\n", prop.warpSize); printf("Max threads per block: %d\n", prop.maxThreadsPerBlock); printf("Max thread dimension: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf("Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf("\n"); } }
40e1ba913090fbe0db3d04caf7fe585166ccc878.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GatherPlugin.hpp" namespace MNN { template <typename T> __global__ void GATHER(const int count, const int outputOutsideStride, const int inputOutsideStride, const int N, const int limit, int insideStride, const T *inputPtr, const T* indicesPtr, T *outputPtr) { CUDA_KERNEL_LOOP(index, count) { int o = index / (N*insideStride); int o_r = index % (N*insideStride); int i = o_r / insideStride; int s = o_r % insideStride; int outputIdx = outputOutsideStride * o + i * insideStride + s; int indices = int(indicesPtr[i]); if (indices < 0 || indices > limit) { outputPtr[outputIdx] = 0.0f; }else{ int inputIdx = inputOutsideStride * o + insideStride * indices + s; outputPtr[outputIdx] = inputPtr[inputIdx]; } } } hipError_t GatherPlugin::GatherExecute(nvinfer1::DataType dataType, const int count, const float* bottom_data, const float* indices, float* top_data, hipStream_t stream) { if (dataType == nvinfer1::DataType::kFLOAT){ hipLaunchKernelGGL(( GATHER<float>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, mOutputOutsideStride, mInputOutsideStride, mN, mLimit, mInsideStride, bottom_data, indices, top_data); }else{ hipLaunchKernelGGL(( GATHER<__half>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, mOutputOutsideStride, mInputOutsideStride, mN, mLimit, mInsideStride, (const __half*)bottom_data, (const __half*)indices, (__half*)top_data); } return hipPeekAtLastError(); } }; // namespace MNN
40e1ba913090fbe0db3d04caf7fe585166ccc878.cu
#include "GatherPlugin.hpp" namespace MNN { template <typename T> __global__ void GATHER(const int count, const int outputOutsideStride, const int inputOutsideStride, const int N, const int limit, int insideStride, const T *inputPtr, const T* indicesPtr, T *outputPtr) { CUDA_KERNEL_LOOP(index, count) { int o = index / (N*insideStride); int o_r = index % (N*insideStride); int i = o_r / insideStride; int s = o_r % insideStride; int outputIdx = outputOutsideStride * o + i * insideStride + s; int indices = int(indicesPtr[i]); if (indices < 0 || indices > limit) { outputPtr[outputIdx] = 0.0f; }else{ int inputIdx = inputOutsideStride * o + insideStride * indices + s; outputPtr[outputIdx] = inputPtr[inputIdx]; } } } cudaError_t GatherPlugin::GatherExecute(nvinfer1::DataType dataType, const int count, const float* bottom_data, const float* indices, float* top_data, cudaStream_t stream) { if (dataType == nvinfer1::DataType::kFLOAT){ GATHER<float><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, mOutputOutsideStride, mInputOutsideStride, mN, mLimit, mInsideStride, bottom_data, indices, top_data); }else{ GATHER<__half><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, mOutputOutsideStride, mInputOutsideStride, mN, mLimit, mInsideStride, (const __half*)bottom_data, (const __half*)indices, (__half*)top_data); } return cudaPeekAtLastError(); } }; // namespace MNN
c5ef7ebf6bbc9a5eb1dc7d7cb2dc624714d0af53.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> static const int WORK_SIZE = 256; /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __host__ __device__ unsigned int bitreverse(unsigned int number) { number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4); number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2); number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1); return number; } /** * CUDA kernel function that reverses the order of bits in each element of the array. */ __global__ void bitreverse(void *data) { unsigned int *idata = (unsigned int*) data; idata[threadIdx.x] = bitreverse(idata[threadIdx.x]); } __global__ void add(int a,int b,int *c){ *c=a+b; } /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(void) { void *d = NULL; int i; unsigned int idata[WORK_SIZE], odata[WORK_SIZE]; for (i = 0; i < WORK_SIZE; i++) idata[i] = (unsigned int) i; CUDA_CHECK_RETURN(hipMalloc((void**) &d, sizeof(int) * WORK_SIZE)); CUDA_CHECK_RETURN( hipMemcpy(d, idata, sizeof(int) * WORK_SIZE, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( bitreverse), dim3(1), dim3(WORK_SIZE), WORK_SIZE * sizeof(int), 0, d); CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete CUDA_CHECK_RETURN(hipGetLastError()); CUDA_CHECK_RETURN(hipMemcpy(odata, d, sizeof(int) * WORK_SIZE, hipMemcpyDeviceToHost)); for (i = 0; i < WORK_SIZE; i++) printf("Input value: %u, device output: %u, host output: %u\n", idata[i], odata[i], bitreverse(idata[i])); CUDA_CHECK_RETURN(hipFree((void*) d)); CUDA_CHECK_RETURN(hipDeviceReset()); int c; int *dev_c; HANDLE_ERROR(hipMalloc((void **)&dev_c,sizeof(int))); hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2,7,dev_c); HANDLE_ERROR(hipMemcpy(&c,dev_c,sizeof(int),hipMemcpyDeviceToHost)); printf("2+7=%d\n",c); hipFree(dev_c); return 0; }
c5ef7ebf6bbc9a5eb1dc7d7cb2dc624714d0af53.cu
/* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> static const int WORK_SIZE = 256; /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __host__ __device__ unsigned int bitreverse(unsigned int number) { number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4); number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2); number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1); return number; } /** * CUDA kernel function that reverses the order of bits in each element of the array. */ __global__ void bitreverse(void *data) { unsigned int *idata = (unsigned int*) data; idata[threadIdx.x] = bitreverse(idata[threadIdx.x]); } __global__ void add(int a,int b,int *c){ *c=a+b; } /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(void) { void *d = NULL; int i; unsigned int idata[WORK_SIZE], odata[WORK_SIZE]; for (i = 0; i < WORK_SIZE; i++) idata[i] = (unsigned int) i; CUDA_CHECK_RETURN(cudaMalloc((void**) &d, sizeof(int) * WORK_SIZE)); CUDA_CHECK_RETURN( cudaMemcpy(d, idata, sizeof(int) * WORK_SIZE, cudaMemcpyHostToDevice)); bitreverse<<<1, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d); CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete CUDA_CHECK_RETURN(cudaGetLastError()); CUDA_CHECK_RETURN(cudaMemcpy(odata, d, sizeof(int) * WORK_SIZE, cudaMemcpyDeviceToHost)); for (i = 0; i < WORK_SIZE; i++) printf("Input value: %u, device output: %u, host output: %u\n", idata[i], odata[i], bitreverse(idata[i])); CUDA_CHECK_RETURN(cudaFree((void*) d)); CUDA_CHECK_RETURN(cudaDeviceReset()); int c; int *dev_c; HANDLE_ERROR(cudaMalloc((void **)&dev_c,sizeof(int))); add<<<1,1>>>(2,7,dev_c); HANDLE_ERROR(cudaMemcpy(&c,dev_c,sizeof(int),cudaMemcpyDeviceToHost)); printf("2+7=%d\n",c); cudaFree(dev_c); return 0; }
89256f543ee40074b0a9d45976daf8795f427fc8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Simple 3D volume renderer #ifndef _VOLUMERENDER_KERNEL_CU_ #define _VOLUMERENDER_KERNEL_CU_ #include <cutil_math.h> bool mallocVolumeArray = false; typedef unsigned int uint; typedef unsigned char uchar; hipArray *d_volumeArray = 0; hipStream_t renderStream; texture<float, 3, hipReadModeElementType> tex; // 3D texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __global__ void d_render(float *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, float voxelThreshold) { const int maxSteps = 500; const float tstep = 0.01f; const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f); const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; float u = (x / (float) imageW)*2.0f-1.0f; float v = (y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color __shared__ float sum[256]; __shared__ float subtractValue[256]; __shared__ float opacThreshold[256]; float t = tnear; int thrIdx = threadIdx.x; sum[thrIdx] = 0; subtractValue[thrIdx] = 0; opacThreshold[thrIdx] = 0.90f; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for(int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] coordinates float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); sample *= 0.2f; if (sum[thrIdx]>0.0f) { subtractValue[thrIdx] += 0.01f; opacThreshold[thrIdx] -= 0.02f; } if (sum[thrIdx]==0.0f && sample > voxelThreshold) { sum[thrIdx] += sample; } else if (sum[threadIdx.x]>0.0f && sample - subtractValue[thrIdx] > 0.0f) { sum[thrIdx] += sample - subtractValue[thrIdx]; } if (sum[thrIdx] >= opacThreshold[thrIdx]) break; t += tstep; if (t > tfar) break; pos += step; } d_output[y*imageW + x] = sum[thrIdx]; } /*************************************************************************************************************************/ /*************************************** END OF KERNELS ***************************************************************/ /*************************************************************************************************************************/ //Initialization for MemcpyDeviceToDevice, for Processing AND Volume Rendering extern "C" void initRayCastCuda(void *d_volume, hipExtent volumeSize, hipMemcpyKind memcpyKind) { // create 3D array hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); if (!mallocVolumeArray) { hipStreamCreate(&renderStream); hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize); mallocVolumeArray = true; } // copy data to 3D array hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr(d_volume, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = memcpyKind; hipMemcpy3D(&copyParams); // set texture parameters tex.normalized = true; // access with normalized texture coordinates tex.filterMode = hipFilterModeLinear; // linear interpolation tex.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates tex.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture hipBindTextureToArray(tex, d_volumeArray, channelDesc); } extern "C" void freeVolumeBuffers() { hipFreeArray(d_volumeArray); mallocVolumeArray = false; } extern "C" void rayCast_kernel(dim3 gridSize, dim3 blockSize, float *d_output, int imageW, int imageH, float density, float brightness, float transferOffset, float transferScale, float voxelThreshold) { hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, renderStream, d_output, imageW, imageH, density, brightness, transferOffset, transferScale, voxelThreshold); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix); } #endif // #ifndef _VOLUMERENDER_KERNEL_CU_
89256f543ee40074b0a9d45976daf8795f427fc8.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Simple 3D volume renderer #ifndef _VOLUMERENDER_KERNEL_CU_ #define _VOLUMERENDER_KERNEL_CU_ #include <cutil_math.h> bool mallocVolumeArray = false; typedef unsigned int uint; typedef unsigned char uchar; cudaArray *d_volumeArray = 0; cudaStream_t renderStream; texture<float, 3, cudaReadModeElementType> tex; // 3D texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __global__ void d_render(float *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, float voxelThreshold) { const int maxSteps = 500; const float tstep = 0.01f; const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f); const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; float u = (x / (float) imageW)*2.0f-1.0f; float v = (y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color __shared__ float sum[256]; __shared__ float subtractValue[256]; __shared__ float opacThreshold[256]; float t = tnear; int thrIdx = threadIdx.x; sum[thrIdx] = 0; subtractValue[thrIdx] = 0; opacThreshold[thrIdx] = 0.90f; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for(int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] coordinates float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); sample *= 0.2f; if (sum[thrIdx]>0.0f) { subtractValue[thrIdx] += 0.01f; opacThreshold[thrIdx] -= 0.02f; } if (sum[thrIdx]==0.0f && sample > voxelThreshold) { sum[thrIdx] += sample; } else if (sum[threadIdx.x]>0.0f && sample - subtractValue[thrIdx] > 0.0f) { sum[thrIdx] += sample - subtractValue[thrIdx]; } if (sum[thrIdx] >= opacThreshold[thrIdx]) break; t += tstep; if (t > tfar) break; pos += step; } d_output[y*imageW + x] = sum[thrIdx]; } /*************************************************************************************************************************/ /*************************************** END OF KERNELS ***************************************************************/ /*************************************************************************************************************************/ //Initialization for MemcpyDeviceToDevice, for Processing AND Volume Rendering extern "C" void initRayCastCuda(void *d_volume, cudaExtent volumeSize, cudaMemcpyKind memcpyKind) { // create 3D array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); if (!mallocVolumeArray) { cudaStreamCreate(&renderStream); cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize); mallocVolumeArray = true; } // copy data to 3D array cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr(d_volume, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = memcpyKind; cudaMemcpy3D(&copyParams); // set texture parameters tex.normalized = true; // access with normalized texture coordinates tex.filterMode = cudaFilterModeLinear; // linear interpolation tex.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates tex.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture cudaBindTextureToArray(tex, d_volumeArray, channelDesc); } extern "C" void freeVolumeBuffers() { cudaFreeArray(d_volumeArray); mallocVolumeArray = false; } extern "C" void rayCast_kernel(dim3 gridSize, dim3 blockSize, float *d_output, int imageW, int imageH, float density, float brightness, float transferOffset, float transferScale, float voxelThreshold) { d_render<<<gridSize, blockSize, 0, renderStream>>>( d_output, imageW, imageH, density, brightness, transferOffset, transferScale, voxelThreshold); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix); } #endif // #ifndef _VOLUMERENDER_KERNEL_CU_
e3bcf3ebc9334f012d870eb1d8329d0d314128e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _TEMPLATE_KERNEL_H_ #define _TEMPLATE_KERNEL_H_ #include <stdio.h> #define SDATA( index) CUT_BANK_CHECKER(sdata, index) //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// __global__ void testKernel( float* g_idata, float* g_odata) { // shared memory // the size is determined by the host application extern __shared__ float sdata[]; // access thread id const unsigned int tid = threadIdx.x; // access number of threads in this block const unsigned int num_threads = blockDim.x; // read in input data from global memory // use the bank checker macro to check for bank conflicts during host // emulation SDATA(tid) = g_idata[tid]; __syncthreads(); // perform some computations SDATA(tid) = (float) num_threads * SDATA( tid); __syncthreads(); // write data to global memory g_odata[tid] = SDATA(tid); } #endif // #ifndef _TEMPLATE_KERNEL_H_
e3bcf3ebc9334f012d870eb1d8329d0d314128e1.cu
#ifndef _TEMPLATE_KERNEL_H_ #define _TEMPLATE_KERNEL_H_ #include <stdio.h> #define SDATA( index) CUT_BANK_CHECKER(sdata, index) //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// __global__ void testKernel( float* g_idata, float* g_odata) { // shared memory // the size is determined by the host application extern __shared__ float sdata[]; // access thread id const unsigned int tid = threadIdx.x; // access number of threads in this block const unsigned int num_threads = blockDim.x; // read in input data from global memory // use the bank checker macro to check for bank conflicts during host // emulation SDATA(tid) = g_idata[tid]; __syncthreads(); // perform some computations SDATA(tid) = (float) num_threads * SDATA( tid); __syncthreads(); // write data to global memory g_odata[tid] = SDATA(tid); } #endif // #ifndef _TEMPLATE_KERNEL_H_
26889d0db1c4b768748884f2910507446b52fe11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2014 by Joern Dinkla, www.dinkla.com, All rights reserved. * * See the LICENSE file in the root directory. */ #include <algorithm> // CUDA 6.5 requires this for std::min #include "Rectangle.h" #include "Mandelbrot.h" #include "Extent.h" #include "CudaUtilities.h" using thrust::raw_pointer_cast; using namespace std; __global__ void mandelbrot_kernel(Extent ext, int* dest, const int max_iter, Rectangle r) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int i = ext.checked_index(x, y); if (i >= 0) { const float x0 = scale(x, ext.get_width(), r.x0, r.x1); const float y0 = scale(y, ext.get_height(), r.y0, r.y1); dest[i] = mandelbrot(x0, y0, max_iter); } } void cuda_mandelbrot( CudaExecConfig& cnf, Extent& ext, thrust::device_vector<int>& d, const int max_iter, Rectangle r) { dim3 g = cnf.get_grid(); dim3 b = cnf.get_block(); hipLaunchKernelGGL(( mandelbrot_kernel), dim3(g), dim3(b), 0, 0, ext, thrust::raw_pointer_cast(&d[0]), max_iter, r); } void cuda_mandelbrot() { CudaTimer timer; const int max_iter = 1000; Rectangle r(-2.5, 1.0, -1.0, 1.0); const int s = 1024 * 10; Extent ext(s, s); CudaExecConfig cnf(ext, dim3(32, 4, 1)); dim3 g = cnf.get_grid(); dim3 b = cnf.get_block(); thrust::device_vector<int> d(ext.get_number_of_elems()); hipDeviceSynchronize(); timer.start(); mandelbrot_kernel << <g, b >> >(ext, raw_pointer_cast(&d[0]), max_iter, r); hipDeviceSynchronize(); timer.stop(); check_cuda(); cout << "Mandelbrot: " << timer.delta() << endl; } void bench_mandelbrot_single( CudaTimer& timer, Extent& ext, thrust::device_vector<int>& d, dim3 thr, const int max_iter, Rectangle r ) { timer.start(); CudaExecConfig cnf(ext, thr); dim3 g = cnf.get_grid(); dim3 b = cnf.get_block(); mandelbrot_kernel << <g, b >> >(ext, raw_pointer_cast(&d[0]), max_iter, r); hipDeviceSynchronize(); check_cuda(); timer.stop(); cout << thr.x << ", " << thr.y << ";" << max_iter << ";" << timer.delta() << endl; }
26889d0db1c4b768748884f2910507446b52fe11.cu
/* * Copyright (c) 2014 by Joern Dinkla, www.dinkla.com, All rights reserved. * * See the LICENSE file in the root directory. */ #include <algorithm> // CUDA 6.5 requires this for std::min #include "Rectangle.h" #include "Mandelbrot.h" #include "Extent.h" #include "CudaUtilities.h" using thrust::raw_pointer_cast; using namespace std; __global__ void mandelbrot_kernel(Extent ext, int* dest, const int max_iter, Rectangle r) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int i = ext.checked_index(x, y); if (i >= 0) { const float x0 = scale(x, ext.get_width(), r.x0, r.x1); const float y0 = scale(y, ext.get_height(), r.y0, r.y1); dest[i] = mandelbrot(x0, y0, max_iter); } } void cuda_mandelbrot( CudaExecConfig& cnf, Extent& ext, thrust::device_vector<int>& d, const int max_iter, Rectangle r) { dim3 g = cnf.get_grid(); dim3 b = cnf.get_block(); mandelbrot_kernel<<<g, b>>>(ext, thrust::raw_pointer_cast(&d[0]), max_iter, r); } void cuda_mandelbrot() { CudaTimer timer; const int max_iter = 1000; Rectangle r(-2.5, 1.0, -1.0, 1.0); const int s = 1024 * 10; Extent ext(s, s); CudaExecConfig cnf(ext, dim3(32, 4, 1)); dim3 g = cnf.get_grid(); dim3 b = cnf.get_block(); thrust::device_vector<int> d(ext.get_number_of_elems()); cudaDeviceSynchronize(); timer.start(); mandelbrot_kernel << <g, b >> >(ext, raw_pointer_cast(&d[0]), max_iter, r); cudaDeviceSynchronize(); timer.stop(); check_cuda(); cout << "Mandelbrot: " << timer.delta() << endl; } void bench_mandelbrot_single( CudaTimer& timer, Extent& ext, thrust::device_vector<int>& d, dim3 thr, const int max_iter, Rectangle r ) { timer.start(); CudaExecConfig cnf(ext, thr); dim3 g = cnf.get_grid(); dim3 b = cnf.get_block(); mandelbrot_kernel << <g, b >> >(ext, raw_pointer_cast(&d[0]), max_iter, r); cudaDeviceSynchronize(); check_cuda(); timer.stop(); cout << thr.x << ", " << thr.y << ";" << max_iter << ";" << timer.delta() << endl; }
41bfd885564f79055b932de71d9f75dfbee9492d.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/JitLoops.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <ATen/native/hip/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/hip/HIPMathCompat.h> #include <c10/util/complex.h> namespace at { namespace native { namespace { const char scaled_modified_bessel_k0_name[] = "scaled_modified_bessel_k0_forward"; void scaled_modified_bessel_k0_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() { jitted_gpu_kernel<scaled_modified_bessel_k0_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k0_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return scaled_modified_bessel_k0_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_scaled_modified_bessel_k0_stub, &scaled_modified_bessel_k0_kernel_cuda); } // namespace native } // namespace at
41bfd885564f79055b932de71d9f75dfbee9492d.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <ATen/native/cuda/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/cuda/CUDAMathCompat.h> #include <c10/util/complex.h> namespace at { namespace native { namespace { const char scaled_modified_bessel_k0_name[] = "scaled_modified_bessel_k0_forward"; void scaled_modified_bessel_k0_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() { jitted_gpu_kernel<scaled_modified_bessel_k0_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k0_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return scaled_modified_bessel_k0_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_scaled_modified_bessel_k0_stub, &scaled_modified_bessel_k0_kernel_cuda); } // namespace native } // namespace at
ca7f85dbbcc9678ce343003201227dd2f14a4854.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/hip/UpSample.cuh> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void upsample_nearest2d_out_frame( const int n, const PackedTensorAccessor<scalar_t, 4> idata, PackedTensorAccessor<scalar_t, 4> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int height1 = idata.size(2); const int width1 = idata.size(3); const int height2 = odata.size(2); const int width2 = odata.size(3); const float height_scale = (float)height1 / (float)height2; const float width_scale = (float)width1 / (float)width2; if (index < n) { const int w2 = index % width2; // 0:width2-1 const int h2 = index / width2; // 0:height2-1 // special case: just copy if (height1 == height2 && width1 == width2) { const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = idata[n][c][h1][w1]; odata[n][c][h2][w2] = val; } } return; } // const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, height1); const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, width1); for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = idata[n][c][h1][w1]; odata[n][c][h2][w2] = val; } } } } // Backward operation template <typename scalar_t, typename accscalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void upsample_nearest2d_backward_out_frame( const int n, PackedTensorAccessor<scalar_t, 4> idata, const PackedTensorAccessor<scalar_t, 4> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int height1 = idata.size(2); const int width1 = idata.size(3); const int height2 = odata.size(2); const int width2 = odata.size(3); const float height_scale = (float)height1 / (float)height2; const float width_scale = (float)width1 / (float)width2; if (index < n) { const int w2 = index % width2; // 0:width2-1 const int h2 = index / width2; // 0:height2-1 // special case: just copy if (height1 == height2 && width1 == width2) { const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = odata[n][c][h2][w2]; idata[n][c][h1][w1] = val; } } return; } // const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, height1); const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, width1); for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t d2val = odata[n][c][h2][w2]; atomicAdd(&idata[n][c][h1][w1], d2val); } } } } static void upsample_nearest2d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef output_size) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU( "upsample_nearest2d_out_cuda_template", {input_arg, output_arg}); AT_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", output_size.size()); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input.size(0); int channels = input.size(1); int input_height = input.size(2); int input_width = input.size(3); upsample_2d_shape_check( input, Tensor(), nbatch, channels, input_height, input_width, output_height, output_width); AT_ASSERT( input_height > 0 && input_width > 0 && output_height > 0 && output_width > 0); output.resize_({input.size(0), input.size(1), output_height, output_width}); output.zero_(); const int num_kernels = output_height * output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_nearest2d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.packed_accessor<scalar_t, 4>(); auto odata = output.packed_accessor<scalar_t, 4>(); hipLaunchKernelGGL(( upsample_nearest2d_out_frame<scalar_t, accscalar_t>) , dim3(cuda::ATenCeilDiv(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, idata, odata); }); AT_CUDA_CHECK(hipGetLastError()); } static void upsample_nearest2d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( "upsample_nearest2d_backward_out_cuda", {grad_output_arg, grad_input_arg}); AT_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", output_size.size()); AT_CHECK( input_size.size() == 4, "It is expected input_size equals to 4, but got size ", input_size.size()); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input_size[0]; int channels = input_size[1]; int input_height = input_size[2]; int input_width = input_size[3]; upsample_2d_shape_check( Tensor(), grad_output_, nbatch, channels, input_height, input_width, output_height, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_({nbatch, channels, input_height, input_width}); grad_input.zero_(); const int num_kernels = output_height * output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_nearest2d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.packed_accessor<scalar_t, 4>(); auto odata = grad_output.packed_accessor<scalar_t, 4>(); hipLaunchKernelGGL(( upsample_nearest2d_backward_out_frame<scalar_t, accscalar_t>) , dim3(cuda::ATenCeilDiv(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, idata, odata); }); AT_CUDA_CHECK(hipGetLastError()); } } // namespace Tensor& upsample_nearest2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size) { upsample_nearest2d_out_cuda_template(output, input, output_size); return output; } Tensor upsample_nearest2d_cuda(const Tensor& input, IntArrayRef output_size) { Tensor output = at::empty_like(input); upsample_nearest2d_out_cuda_template(output, input, output_size); return output; } Tensor& upsample_nearest2d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size) { upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size); return grad_input; } Tensor upsample_nearest2d_backward_cuda( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size) { Tensor grad_input = at::empty_like(grad_output); upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size); return grad_input; } } // namespace native } // namespace at
ca7f85dbbcc9678ce343003201227dd2f14a4854.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/cuda/UpSample.cuh> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void upsample_nearest2d_out_frame( const int n, const PackedTensorAccessor<scalar_t, 4> idata, PackedTensorAccessor<scalar_t, 4> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int height1 = idata.size(2); const int width1 = idata.size(3); const int height2 = odata.size(2); const int width2 = odata.size(3); const float height_scale = (float)height1 / (float)height2; const float width_scale = (float)width1 / (float)width2; if (index < n) { const int w2 = index % width2; // 0:width2-1 const int h2 = index / width2; // 0:height2-1 // special case: just copy if (height1 == height2 && width1 == width2) { const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = idata[n][c][h1][w1]; odata[n][c][h2][w2] = val; } } return; } // const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, height1); const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, width1); for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = idata[n][c][h1][w1]; odata[n][c][h2][w2] = val; } } } } // Backward operation template <typename scalar_t, typename accscalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void upsample_nearest2d_backward_out_frame( const int n, PackedTensorAccessor<scalar_t, 4> idata, const PackedTensorAccessor<scalar_t, 4> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int height1 = idata.size(2); const int width1 = idata.size(3); const int height2 = odata.size(2); const int width2 = odata.size(3); const float height_scale = (float)height1 / (float)height2; const float width_scale = (float)width1 / (float)width2; if (index < n) { const int w2 = index % width2; // 0:width2-1 const int h2 = index / width2; // 0:height2-1 // special case: just copy if (height1 == height2 && width1 == width2) { const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = odata[n][c][h2][w2]; idata[n][c][h1][w1] = val; } } return; } // const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, height1); const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, width1); for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t d2val = odata[n][c][h2][w2]; atomicAdd(&idata[n][c][h1][w1], d2val); } } } } static void upsample_nearest2d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef output_size) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU( "upsample_nearest2d_out_cuda_template", {input_arg, output_arg}); AT_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", output_size.size()); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input.size(0); int channels = input.size(1); int input_height = input.size(2); int input_width = input.size(3); upsample_2d_shape_check( input, Tensor(), nbatch, channels, input_height, input_width, output_height, output_width); AT_ASSERT( input_height > 0 && input_width > 0 && output_height > 0 && output_width > 0); output.resize_({input.size(0), input.size(1), output_height, output_width}); output.zero_(); const int num_kernels = output_height * output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_nearest2d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.packed_accessor<scalar_t, 4>(); auto odata = output.packed_accessor<scalar_t, 4>(); upsample_nearest2d_out_frame<scalar_t, accscalar_t> <<<cuda::ATenCeilDiv(num_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, idata, odata); }); AT_CUDA_CHECK(cudaGetLastError()); } static void upsample_nearest2d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( "upsample_nearest2d_backward_out_cuda", {grad_output_arg, grad_input_arg}); AT_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", output_size.size()); AT_CHECK( input_size.size() == 4, "It is expected input_size equals to 4, but got size ", input_size.size()); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input_size[0]; int channels = input_size[1]; int input_height = input_size[2]; int input_width = input_size[3]; upsample_2d_shape_check( Tensor(), grad_output_, nbatch, channels, input_height, input_width, output_height, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_({nbatch, channels, input_height, input_width}); grad_input.zero_(); const int num_kernels = output_height * output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_nearest2d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.packed_accessor<scalar_t, 4>(); auto odata = grad_output.packed_accessor<scalar_t, 4>(); upsample_nearest2d_backward_out_frame<scalar_t, accscalar_t> <<<cuda::ATenCeilDiv(num_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, idata, odata); }); AT_CUDA_CHECK(cudaGetLastError()); } } // namespace Tensor& upsample_nearest2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size) { upsample_nearest2d_out_cuda_template(output, input, output_size); return output; } Tensor upsample_nearest2d_cuda(const Tensor& input, IntArrayRef output_size) { Tensor output = at::empty_like(input); upsample_nearest2d_out_cuda_template(output, input, output_size); return output; } Tensor& upsample_nearest2d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size) { upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size); return grad_input; } Tensor upsample_nearest2d_backward_cuda( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size) { Tensor grad_input = at::empty_like(grad_output); upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size); return grad_input; } } // namespace native } // namespace at
aec69d903b52d96b65c3c15bd10f56c598bda7b4.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "encoder_decoder.h" #include "gpu/mblas/matrix.h" #include "common/god.h" using namespace std; namespace GPU { //////////////////////////////////////////// std::string EncoderDecoderState::Debug() const { return states_.Debug(); } mblas::Matrix& EncoderDecoderState::GetStates() { return states_; } mblas::Matrix& EncoderDecoderState::GetEmbeddings() { return embeddings_; } const mblas::Matrix& EncoderDecoderState::GetStates() const { return states_; } const mblas::Matrix& EncoderDecoderState::GetEmbeddings() const { return embeddings_; } //////////////////////////////////////////// EncoderDecoder::EncoderDecoder(const std::string& name, const YAML::Node& config, size_t tab, const Weights& model) : Scorer(name, config, tab), model_(model), encoder_(new Encoder(model_)), decoder_(new Decoder(model_)) {} void EncoderDecoder::Score(const State& in, BaseMatrix& prob, State& out) { const EDState& edIn = in.get<EDState>(); EDState& edOut = out.get<EDState>(); mblas::Matrix &probCast = static_cast<mblas::Matrix&>(prob); decoder_->MakeStep(edOut.GetStates(), probCast, edIn.GetStates(), edIn.GetEmbeddings(), SourceContext_); } State* EncoderDecoder::NewState() { return new EDState(); } void EncoderDecoder::BeginSentenceState(State& state) { EDState& edState = state.get<EDState>(); decoder_->EmptyState(edState.GetStates(), SourceContext_, 1); decoder_->EmptyEmbedding(edState.GetEmbeddings(), 1); } void EncoderDecoder::SetSource(const Sentence& source) { //cerr << "SetSource" << endl; encoder_->GetContext(source.GetWords(tab_), SourceContext_); } void EncoderDecoder::AssembleBeamState(const State& in, const Beam& beam, State& out) { std::vector<size_t> beamWords; std::vector<size_t> beamStateIds; for(auto h : beam) { beamWords.push_back(h->GetWord()); beamStateIds.push_back(h->GetPrevStateIndex()); } const EDState& edIn = in.get<EDState>(); EDState& edOut = out.get<EDState>(); mblas::Assemble(edOut.GetStates(), edIn.GetStates(), beamStateIds); decoder_->Lookup(edOut.GetEmbeddings(), beamWords); } void EncoderDecoder::GetAttention(mblas::Matrix& Attention) { decoder_->GetAttention(Attention); } size_t EncoderDecoder::GetVocabSize() const { return decoder_->GetVocabSize(); } BaseMatrix *EncoderDecoder::CreateMatrix() { mblas::Matrix *ret = new mblas::Matrix(); return ret; } //////////////////////////////////////////// EncoderDecoderLoader::EncoderDecoderLoader(const std::string name, const YAML::Node& config) : Loader(name, config) {} void EncoderDecoderLoader::Load() { std::string path = Get<std::string>("path"); auto devices = God::Get<std::vector<size_t>>("devices"); ThreadPool devicePool(devices.size()); weights_.resize(devices.size()); size_t i = 0; for(auto d : devices) { devicePool.enqueue([i, d, &path, this] { LOG(info) << "Loading model " << path << " onto gpu" << d; hipSetDevice(d); weights_[i].reset(new Weights(path, d)); }); ++i; } } ScorerPtr EncoderDecoderLoader::NewScorer(size_t taskId) { size_t i = taskId % weights_.size(); size_t d = weights_[i]->GetDevice(); hipSetDevice(d); size_t tab = Has("tab") ? Get<size_t>("tab") : 0; return ScorerPtr(new EncoderDecoder(name_, config_, tab, *weights_[i])); } }
aec69d903b52d96b65c3c15bd10f56c598bda7b4.cu
#include <iostream> #include "encoder_decoder.h" #include "gpu/mblas/matrix.h" #include "common/god.h" using namespace std; namespace GPU { //////////////////////////////////////////// std::string EncoderDecoderState::Debug() const { return states_.Debug(); } mblas::Matrix& EncoderDecoderState::GetStates() { return states_; } mblas::Matrix& EncoderDecoderState::GetEmbeddings() { return embeddings_; } const mblas::Matrix& EncoderDecoderState::GetStates() const { return states_; } const mblas::Matrix& EncoderDecoderState::GetEmbeddings() const { return embeddings_; } //////////////////////////////////////////// EncoderDecoder::EncoderDecoder(const std::string& name, const YAML::Node& config, size_t tab, const Weights& model) : Scorer(name, config, tab), model_(model), encoder_(new Encoder(model_)), decoder_(new Decoder(model_)) {} void EncoderDecoder::Score(const State& in, BaseMatrix& prob, State& out) { const EDState& edIn = in.get<EDState>(); EDState& edOut = out.get<EDState>(); mblas::Matrix &probCast = static_cast<mblas::Matrix&>(prob); decoder_->MakeStep(edOut.GetStates(), probCast, edIn.GetStates(), edIn.GetEmbeddings(), SourceContext_); } State* EncoderDecoder::NewState() { return new EDState(); } void EncoderDecoder::BeginSentenceState(State& state) { EDState& edState = state.get<EDState>(); decoder_->EmptyState(edState.GetStates(), SourceContext_, 1); decoder_->EmptyEmbedding(edState.GetEmbeddings(), 1); } void EncoderDecoder::SetSource(const Sentence& source) { //cerr << "SetSource" << endl; encoder_->GetContext(source.GetWords(tab_), SourceContext_); } void EncoderDecoder::AssembleBeamState(const State& in, const Beam& beam, State& out) { std::vector<size_t> beamWords; std::vector<size_t> beamStateIds; for(auto h : beam) { beamWords.push_back(h->GetWord()); beamStateIds.push_back(h->GetPrevStateIndex()); } const EDState& edIn = in.get<EDState>(); EDState& edOut = out.get<EDState>(); mblas::Assemble(edOut.GetStates(), edIn.GetStates(), beamStateIds); decoder_->Lookup(edOut.GetEmbeddings(), beamWords); } void EncoderDecoder::GetAttention(mblas::Matrix& Attention) { decoder_->GetAttention(Attention); } size_t EncoderDecoder::GetVocabSize() const { return decoder_->GetVocabSize(); } BaseMatrix *EncoderDecoder::CreateMatrix() { mblas::Matrix *ret = new mblas::Matrix(); return ret; } //////////////////////////////////////////// EncoderDecoderLoader::EncoderDecoderLoader(const std::string name, const YAML::Node& config) : Loader(name, config) {} void EncoderDecoderLoader::Load() { std::string path = Get<std::string>("path"); auto devices = God::Get<std::vector<size_t>>("devices"); ThreadPool devicePool(devices.size()); weights_.resize(devices.size()); size_t i = 0; for(auto d : devices) { devicePool.enqueue([i, d, &path, this] { LOG(info) << "Loading model " << path << " onto gpu" << d; cudaSetDevice(d); weights_[i].reset(new Weights(path, d)); }); ++i; } } ScorerPtr EncoderDecoderLoader::NewScorer(size_t taskId) { size_t i = taskId % weights_.size(); size_t d = weights_[i]->GetDevice(); cudaSetDevice(d); size_t tab = Has("tab") ? Get<size_t>("tab") : 0; return ScorerPtr(new EncoderDecoder(name_, config_, tab, *weights_[i])); } }
7b78e7e3869a0d84216cec83d31f129337fcd004.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *vector, int N) { for(int i = 0; i < N; i++) { if(vector[i] != target) { printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main() { /* * * */ int deviceId; int numberOfSMs; hipGetDevice(&deviceId); hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId); printf("Device ID: %d\tNumber of SMs: %d\n", deviceId, numberOfSMs); const int N = 2<<24; size_t size = N * sizeof(float); float *a; float *b; float *c; hipMallocManaged(&a, size); hipMallocManaged(&b, size); hipMallocManaged(&c, size); initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); size_t threadsPerBlock; size_t numberOfBlocks; threadsPerBlock = 256; numberOfBlocks = 32 * numberOfSMs; hipError_t addVectorsErr; hipError_t asyncErr; hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N); addVectorsErr = hipGetLastError(); if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr)); asyncErr = hipDeviceSynchronize(); if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr)); checkElementsAre(7, c, N); hipFree(a); hipFree(b); hipFree(c); }
7b78e7e3869a0d84216cec83d31f129337fcd004.cu
#include <stdio.h> void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *vector, int N) { for(int i = 0; i < N; i++) { if(vector[i] != target) { printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main() { /* * ページ フォールトを減らし、パフォーマンスを向上させるために、 * 非同期のメモリ プリフェッチを追加します。 */ int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); printf("Device ID: %d\tNumber of SMs: %d\n", deviceId, numberOfSMs); const int N = 2<<24; size_t size = N * sizeof(float); float *a; float *b; float *c; cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); size_t threadsPerBlock; size_t numberOfBlocks; threadsPerBlock = 256; numberOfBlocks = 32 * numberOfSMs; cudaError_t addVectorsErr; cudaError_t asyncErr; addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N); addVectorsErr = cudaGetLastError(); if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr)); asyncErr = cudaDeviceSynchronize(); if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr)); checkElementsAre(7, c, N); cudaFree(a); cudaFree(b); cudaFree(c); }
a7bf1d28efcd3e44a967dddcbd9429fdc9266682.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Based on the Eric's C-code implementation of ldpc decode. // #include <cstdio> #include <math.h> #include <string.h> #include <stdio.h> #include <time.h> #include "GPUincludes.h" #include "LDPC.h" #define NTHREADS 64 #define CNP_THREADS 20 // checkNodeProcessing threads unsigned int nChecksByBits; unsigned int nBitsByChecks; bundleElt *eta; bundleElt *etaByBitIndex; bundleElt *lambdaByCheckIndex; bundleElt *cHat; bundleElt *parityBits; bundleElt paritySum; bundleElt *dev_rSig; bundleElt *dev_eta; bundleElt *dev_lambda; bundleElt *dev_etaByBitIndex; bundleElt *dev_lambdaByCheckIndex; bundleElt *dev_cHat; bundleElt *dev_parityBits; unsigned int *dev_mapRC; unsigned int *dev_mapCR; size_t temp_storage_bytes; int* temp_storage=NULL; void initLdpcDecoder (H_matrix *hmat, unsigned int nBundles) { unsigned int *mapRows2Cols = hmat->mapRows2Cols; unsigned int *mapCols2Rows = hmat->mapCols2Rows; unsigned int numBits = hmat->numBits; unsigned int numChecks = hmat->numChecks; unsigned int maxBitsPerCheck = hmat->maxBitsPerCheck; unsigned int maxChecksPerBit = hmat->maxChecksPerBit; bundleElt numContributorsBE; unsigned int bundleAddr; unsigned int nChecksByBits = numChecks*(maxBitsPerCheck+1); unsigned int nBitsByChecks = numBits*(maxChecksPerBit+1); HANDLE_ERROR( hipHostMalloc((void**) &eta, nChecksByBits* nBundles*sizeof(bundleElt))); HANDLE_ERROR( hipHostMalloc((void**) & etaByBitIndex, nBitsByChecks * nBundles*sizeof(bundleElt))); HANDLE_ERROR( hipHostMalloc((void**) & lambdaByCheckIndex, nChecksByBits* nBundles*sizeof(bundleElt))); HANDLE_ERROR( hipHostMalloc((void**) & cHat, nChecksByBits* nBundles*sizeof(bundleElt))); HANDLE_ERROR( hipHostMalloc((void**) & parityBits, numChecks * nBundles*sizeof(bundleElt))); HANDLE_ERROR( hipMalloc( (void**)&dev_rSig, numBits * nBundles*sizeof(bundleElt) )); HANDLE_ERROR( hipMalloc( (void**)&dev_eta, nChecksByBits * nBundles*sizeof(bundleElt))); HANDLE_ERROR( hipMalloc( (void**)&dev_lambda, numBits * nBundles*sizeof(bundleElt))); HANDLE_ERROR( hipMalloc( (void**)&dev_etaByBitIndex, nBitsByChecks * nBundles*sizeof(bundleElt))); HANDLE_ERROR( hipMalloc( (void**)&dev_lambdaByCheckIndex, nChecksByBits * nBundles*sizeof(bundleElt))); HANDLE_ERROR( hipMalloc( (void**)&dev_mapRC, nChecksByBits * sizeof(unsigned int))); HANDLE_ERROR( hipMalloc( (void**)&dev_mapCR, nBitsByChecks * sizeof(unsigned int))); HANDLE_ERROR( hipMalloc( (void**)&dev_cHat, nChecksByBits * nBundles*sizeof(bundleElt))); HANDLE_ERROR( hipMalloc( (void**)&dev_parityBits, numChecks * nBundles*sizeof(bundleElt))); HANDLE_ERROR(hipMemcpy(dev_mapRC, mapRows2Cols, nChecksByBits * sizeof(unsigned int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_mapCR, mapCols2Rows, nBitsByChecks * sizeof(unsigned int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_cHat, cHat, nChecksByBits * nBundles*sizeof(bundleElt), hipMemcpyHostToDevice)); bundleElt zeroBE = make_bundleElt(0.0); bundleElt twoBE = make_bundleElt(2.0); for (unsigned int i=0; i < nChecksByBits*nBundles; i++) { eta[i] = zeroBE; lambdaByCheckIndex[i] = zeroBE; } for (unsigned int i=0; i < nBitsByChecks*nBundles*nBundles; i++) etaByBitIndex[i] = zeroBE; // All matrices are stored in column order. // For eta and lambdaByCheckIndex, each column represents a Check node. // There are maxBitsPerCheck+1 rows. // row 0 always contains the number of contributors for this check node. for (unsigned int check=0; check<numChecks; check++) { numContributorsBE = make_bundleElt((float)mapRows2Cols[check]); for (unsigned int bundle=0; bundle < nBundles; bundle++) { bundleAddr = bundle * nChecksByBits + check; eta[bundleAddr] = numContributorsBE; lambdaByCheckIndex[bundleAddr] = numContributorsBE; cHat[bundleAddr] = numContributorsBE; } } // Need to have row 0 (see preceding code segment) in lambdaByCheckIndex and cHat into device memory, now. // For each new record, these device memory matrices are updated with a kernel HANDLE_ERROR(hipMemcpy(dev_lambdaByCheckIndex, lambdaByCheckIndex, nChecksByBits * nBundles*sizeof(bundleElt), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_cHat, cHat, nChecksByBits * nBundles*sizeof(bundleElt), hipMemcpyHostToDevice)); // For etaByBitIndex, each column corresponds to a bit node. // row 0, contains the number of contributors for this bit. for (unsigned int bit=0; bit<numBits; bit++) { etaByBitIndex[bit] = make_bundleElt((float)mapCols2Rows[bit]); } } int ldpcDecoderWithInit (H_matrix *hmat, bundleElt *rSig, unsigned int maxIterations, unsigned int *decision, bundleElt *estimates, unsigned int nBundles) { unsigned int numBits = hmat->numBits; unsigned int numChecks = hmat->numChecks; unsigned int maxBitsPerCheck = hmat->maxBitsPerCheck; unsigned int maxChecksPerBit = hmat->maxChecksPerBit; unsigned int nChecksByBits = numChecks*(maxBitsPerCheck+1); unsigned int nBitsByChecks = numBits*(maxChecksPerBit+1); unsigned int *mapCols2Rows = hmat->mapCols2Rows; unsigned int iterCounter; bool allChecksPassed = false; unsigned int bundleBase; unsigned int successCount; unsigned int returnVal; for (int localBit = 0; localBit < numBits; localBit++) { unsigned int thisRowLength = mapCols2Rows[localBit]; for (int locali = 0; locali < thisRowLength; locali++) { unsigned int cellIndex = locali * numBits + localBit; unsigned int oneDindex = mapCols2Rows[cellIndex]; for (int localBundle = 0; localBundle < nBundles; localBundle++) { int bitOffset = numBits * localBundle; int lambdaOffset = nChecksByBits * localBundle; lambdaByCheckIndex[oneDindex + lambdaOffset] = rSig[localBit + bitOffset]; } } } HANDLE_ERROR(hipMemcpy(dev_rSig, rSig, numBits * nBundles*sizeof(bundleElt), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_etaByBitIndex, etaByBitIndex, nBitsByChecks * nBundles*sizeof(bundleElt), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_eta, eta, nChecksByBits * nBundles*sizeof(bundleElt), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_lambdaByCheckIndex, lambdaByCheckIndex, nChecksByBits * nBundles*sizeof(bundleElt), hipMemcpyHostToDevice)); // copyBitsToCheckmatrix<<<numBits*nBundles, NTHREADS>>>(dev_mapCR, dev_rSig, dev_lambdaByCheckIndex, numBits, maxChecksPerBit, nChecksByBits, nBitsByChecks, nBundles); //////////////////////////////////////////////////////////////////////////// // Main iteration loop //////////////////////////////////////////////////////////////////////////// for(iterCounter=1;iterCounter<=maxIterations;iterCounter++) { hipLaunchKernelGGL(( checkNodeProcessingOptimalBlock) , dim3(numChecks*nBundles), dim3(CNP_THREADS), 0, 0, numChecks, maxBitsPerCheck, dev_lambdaByCheckIndex, dev_eta, dev_mapRC, dev_etaByBitIndex, nChecksByBits, nBitsByChecks, nBundles); hipLaunchKernelGGL(( bitEstimates), dim3((numBits*nBundles)/NTHREADS+1),dim3(NTHREADS), 0, 0, dev_rSig, dev_etaByBitIndex, dev_lambdaByCheckIndex, dev_cHat, dev_mapCR, numBits,maxChecksPerBit, nChecksByBits, nBitsByChecks, nBundles); hipLaunchKernelGGL(( calcParityBits) , dim3((numChecks*nBundles)/NTHREADS+1), dim3(NTHREADS), 0, 0, dev_cHat, dev_parityBits, numChecks, maxBitsPerCheck, nChecksByBits, nBundles); allChecksPassed = true; // The cpu is slightly faster than GPU DeviceReduce to determine if any paritycheck is non-zero. HANDLE_ERROR(hipMemcpy(parityBits, dev_parityBits, numChecks*nBundles*sizeof(bundleElt),hipMemcpyDeviceToHost)); paritySum = make_bundleElt(0.0); // We loop over parity checks for all bundles (in a single loop here) for (unsigned int check=0; check < numChecks*nBundles; check++) { for (unsigned int slot=0; slot< SLOTS_PER_ELT; slot++) if ((int)parityBits[check].s[slot] != 0) allChecksPassed = false; if (! allChecksPassed) break; } if (allChecksPassed) break; } // Return our best guess. // if iterCounter < maxIterations, then successful. successCount = 0; for (unsigned int bundle=0; bundle < nBundles; bundle++) { bundleBase = bundle* numChecks; paritySum = make_bundleElt(0.0); for (unsigned int check=0; check < numChecks; check++) paritySum += parityBits[bundleBase + check]; for (unsigned int slot=0; slot< SLOTS_PER_ELT; slot++) if ((int)paritySum.s[slot] == 0) successCount++; } returnVal = (successCount << 8) + iterCounter; return (returnVal); }
a7bf1d28efcd3e44a967dddcbd9429fdc9266682.cu
// Based on the Eric's C-code implementation of ldpc decode. // #include <cstdio> #include <math.h> #include <string.h> #include <stdio.h> #include <time.h> #include "GPUincludes.h" #include "LDPC.h" #define NTHREADS 64 #define CNP_THREADS 20 // checkNodeProcessing threads unsigned int nChecksByBits; unsigned int nBitsByChecks; bundleElt *eta; bundleElt *etaByBitIndex; bundleElt *lambdaByCheckIndex; bundleElt *cHat; bundleElt *parityBits; bundleElt paritySum; bundleElt *dev_rSig; bundleElt *dev_eta; bundleElt *dev_lambda; bundleElt *dev_etaByBitIndex; bundleElt *dev_lambdaByCheckIndex; bundleElt *dev_cHat; bundleElt *dev_parityBits; unsigned int *dev_mapRC; unsigned int *dev_mapCR; size_t temp_storage_bytes; int* temp_storage=NULL; void initLdpcDecoder (H_matrix *hmat, unsigned int nBundles) { unsigned int *mapRows2Cols = hmat->mapRows2Cols; unsigned int *mapCols2Rows = hmat->mapCols2Rows; unsigned int numBits = hmat->numBits; unsigned int numChecks = hmat->numChecks; unsigned int maxBitsPerCheck = hmat->maxBitsPerCheck; unsigned int maxChecksPerBit = hmat->maxChecksPerBit; bundleElt numContributorsBE; unsigned int bundleAddr; unsigned int nChecksByBits = numChecks*(maxBitsPerCheck+1); unsigned int nBitsByChecks = numBits*(maxChecksPerBit+1); HANDLE_ERROR( cudaMallocHost((void**) &eta, nChecksByBits* nBundles*sizeof(bundleElt))); HANDLE_ERROR( cudaMallocHost((void**) & etaByBitIndex, nBitsByChecks * nBundles*sizeof(bundleElt))); HANDLE_ERROR( cudaMallocHost((void**) & lambdaByCheckIndex, nChecksByBits* nBundles*sizeof(bundleElt))); HANDLE_ERROR( cudaMallocHost((void**) & cHat, nChecksByBits* nBundles*sizeof(bundleElt))); HANDLE_ERROR( cudaMallocHost((void**) & parityBits, numChecks * nBundles*sizeof(bundleElt))); HANDLE_ERROR( cudaMalloc( (void**)&dev_rSig, numBits * nBundles*sizeof(bundleElt) )); HANDLE_ERROR( cudaMalloc( (void**)&dev_eta, nChecksByBits * nBundles*sizeof(bundleElt))); HANDLE_ERROR( cudaMalloc( (void**)&dev_lambda, numBits * nBundles*sizeof(bundleElt))); HANDLE_ERROR( cudaMalloc( (void**)&dev_etaByBitIndex, nBitsByChecks * nBundles*sizeof(bundleElt))); HANDLE_ERROR( cudaMalloc( (void**)&dev_lambdaByCheckIndex, nChecksByBits * nBundles*sizeof(bundleElt))); HANDLE_ERROR( cudaMalloc( (void**)&dev_mapRC, nChecksByBits * sizeof(unsigned int))); HANDLE_ERROR( cudaMalloc( (void**)&dev_mapCR, nBitsByChecks * sizeof(unsigned int))); HANDLE_ERROR( cudaMalloc( (void**)&dev_cHat, nChecksByBits * nBundles*sizeof(bundleElt))); HANDLE_ERROR( cudaMalloc( (void**)&dev_parityBits, numChecks * nBundles*sizeof(bundleElt))); HANDLE_ERROR(cudaMemcpy(dev_mapRC, mapRows2Cols, nChecksByBits * sizeof(unsigned int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_mapCR, mapCols2Rows, nBitsByChecks * sizeof(unsigned int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_cHat, cHat, nChecksByBits * nBundles*sizeof(bundleElt), cudaMemcpyHostToDevice)); bundleElt zeroBE = make_bundleElt(0.0); bundleElt twoBE = make_bundleElt(2.0); for (unsigned int i=0; i < nChecksByBits*nBundles; i++) { eta[i] = zeroBE; lambdaByCheckIndex[i] = zeroBE; } for (unsigned int i=0; i < nBitsByChecks*nBundles*nBundles; i++) etaByBitIndex[i] = zeroBE; // All matrices are stored in column order. // For eta and lambdaByCheckIndex, each column represents a Check node. // There are maxBitsPerCheck+1 rows. // row 0 always contains the number of contributors for this check node. for (unsigned int check=0; check<numChecks; check++) { numContributorsBE = make_bundleElt((float)mapRows2Cols[check]); for (unsigned int bundle=0; bundle < nBundles; bundle++) { bundleAddr = bundle * nChecksByBits + check; eta[bundleAddr] = numContributorsBE; lambdaByCheckIndex[bundleAddr] = numContributorsBE; cHat[bundleAddr] = numContributorsBE; } } // Need to have row 0 (see preceding code segment) in lambdaByCheckIndex and cHat into device memory, now. // For each new record, these device memory matrices are updated with a kernel HANDLE_ERROR(cudaMemcpy(dev_lambdaByCheckIndex, lambdaByCheckIndex, nChecksByBits * nBundles*sizeof(bundleElt), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_cHat, cHat, nChecksByBits * nBundles*sizeof(bundleElt), cudaMemcpyHostToDevice)); // For etaByBitIndex, each column corresponds to a bit node. // row 0, contains the number of contributors for this bit. for (unsigned int bit=0; bit<numBits; bit++) { etaByBitIndex[bit] = make_bundleElt((float)mapCols2Rows[bit]); } } int ldpcDecoderWithInit (H_matrix *hmat, bundleElt *rSig, unsigned int maxIterations, unsigned int *decision, bundleElt *estimates, unsigned int nBundles) { unsigned int numBits = hmat->numBits; unsigned int numChecks = hmat->numChecks; unsigned int maxBitsPerCheck = hmat->maxBitsPerCheck; unsigned int maxChecksPerBit = hmat->maxChecksPerBit; unsigned int nChecksByBits = numChecks*(maxBitsPerCheck+1); unsigned int nBitsByChecks = numBits*(maxChecksPerBit+1); unsigned int *mapCols2Rows = hmat->mapCols2Rows; unsigned int iterCounter; bool allChecksPassed = false; unsigned int bundleBase; unsigned int successCount; unsigned int returnVal; for (int localBit = 0; localBit < numBits; localBit++) { unsigned int thisRowLength = mapCols2Rows[localBit]; for (int locali = 0; locali < thisRowLength; locali++) { unsigned int cellIndex = locali * numBits + localBit; unsigned int oneDindex = mapCols2Rows[cellIndex]; for (int localBundle = 0; localBundle < nBundles; localBundle++) { int bitOffset = numBits * localBundle; int lambdaOffset = nChecksByBits * localBundle; lambdaByCheckIndex[oneDindex + lambdaOffset] = rSig[localBit + bitOffset]; } } } HANDLE_ERROR(cudaMemcpy(dev_rSig, rSig, numBits * nBundles*sizeof(bundleElt), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_etaByBitIndex, etaByBitIndex, nBitsByChecks * nBundles*sizeof(bundleElt), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_eta, eta, nChecksByBits * nBundles*sizeof(bundleElt), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_lambdaByCheckIndex, lambdaByCheckIndex, nChecksByBits * nBundles*sizeof(bundleElt), cudaMemcpyHostToDevice)); // copyBitsToCheckmatrix<<<numBits*nBundles, NTHREADS>>>(dev_mapCR, dev_rSig, dev_lambdaByCheckIndex, numBits, maxChecksPerBit, nChecksByBits, nBitsByChecks, nBundles); //////////////////////////////////////////////////////////////////////////// // Main iteration loop //////////////////////////////////////////////////////////////////////////// for(iterCounter=1;iterCounter<=maxIterations;iterCounter++) { checkNodeProcessingOptimalBlock <<<numChecks*nBundles, CNP_THREADS>>> (numChecks, maxBitsPerCheck, dev_lambdaByCheckIndex, dev_eta, dev_mapRC, dev_etaByBitIndex, nChecksByBits, nBitsByChecks, nBundles); bitEstimates<<<(numBits*nBundles)/NTHREADS+1,NTHREADS>>> (dev_rSig, dev_etaByBitIndex, dev_lambdaByCheckIndex, dev_cHat, dev_mapCR, numBits,maxChecksPerBit, nChecksByBits, nBitsByChecks, nBundles); calcParityBits <<<(numChecks*nBundles)/NTHREADS+1, NTHREADS>>> (dev_cHat, dev_parityBits, numChecks, maxBitsPerCheck, nChecksByBits, nBundles); allChecksPassed = true; // The cpu is slightly faster than GPU DeviceReduce to determine if any paritycheck is non-zero. HANDLE_ERROR(cudaMemcpy(parityBits, dev_parityBits, numChecks*nBundles*sizeof(bundleElt),cudaMemcpyDeviceToHost)); paritySum = make_bundleElt(0.0); // We loop over parity checks for all bundles (in a single loop here) for (unsigned int check=0; check < numChecks*nBundles; check++) { for (unsigned int slot=0; slot< SLOTS_PER_ELT; slot++) if ((int)parityBits[check].s[slot] != 0) allChecksPassed = false; if (! allChecksPassed) break; } if (allChecksPassed) break; } // Return our best guess. // if iterCounter < maxIterations, then successful. successCount = 0; for (unsigned int bundle=0; bundle < nBundles; bundle++) { bundleBase = bundle* numChecks; paritySum = make_bundleElt(0.0); for (unsigned int check=0; check < numChecks; check++) paritySum += parityBits[bundleBase + check]; for (unsigned int slot=0; slot< SLOTS_PER_ELT; slot++) if ((int)paritySum.s[slot] == 0) successCount++; } returnVal = (successCount << 8) + iterCounter; return (returnVal); }
d36e7dd99d7875e9519667994fd34bbe35cb0246.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "f_eval.cuh" #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> #include <thrust/host_vector.h> struct func4 { func4():{} __host__ __device__ double operator(double& input)() { return -1.7; } }; int main(int argc, char* argv[]) { if(argc == 4){ FILE* fpIn = fopen(argv[1], "r"); FILE* fpOut = fopen(argv[2], "w"); double epsilon = atof(argv[3]); const double h = 1e-2; // n different input points, m variables ( x1,x2,.....xm) at each point int n, m; fscanf(fpIn, "%d", &n); fscanf(fpIn, "%d", &m); double *input = (double*) malloc(m * n * sizeof(double)); double *output = (double*) malloc(m * n * sizeof(double)); thrust::host_vector<double> hX(n*m); // Read all the input points from the file for (int i = 0; i < n; i++) { for(int j = 0; j < m; j++) { fscanf(fpIn, "%lf,", &input[i*m+j]); hX[i*m+j] = input[i*m+j]; } } // Start the timer // double start = omp_get_wtime(); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, NULL); thrust::device_vector<double> dX = hX; thrust::device_vector<double> dout(n*m); // thrust::transform(dX.begin(),dX.end(),dout.begin(), func(m,n,h,epsilon)); // for(int i = 0; i < n; i++){ // int start_addr = i*m; // thrust::transform(&dX[start_addr],&dX[start_addr+m],&dout[start_addr], func3(m,&dX[start_addr],h,epsilon)); // } thrust::transform(dX.begin(),dX.end(),dout.begin(),func4()); thrust::copy(dout.begin(), dout.end(), &output[0]); // Stop the timer // double end = omp_get_wtime(); hipEventRecord(stop,NULL); hipEventSynchronize(stop); float msecTotal = 0.0f; hipEventElapsedTime(&msecTotal, start, stop); // Write the result into the file for(int i = 0; i < n; i++) { for(int j = 0; j < m; j++) { if(j != m-1) fprintf(fpOut, "%.4lf,", output[i*m + j] ); else fprintf(fpOut, "%.4lf\n", output[i*m + j] ); } } fclose(fpIn); fclose(fpOut); double time = msecTotal; // time in ms // Create a new file to log the execution time FILE* fpLog = fopen("sequentialLog", "a"); fprintf(fpLog, "%d\t%d\t%lf\n", n, m, time); fclose(fpLog); free(input); free(output); } else{ printf("Insufficient arguments"); } return 0; }
d36e7dd99d7875e9519667994fd34bbe35cb0246.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "f_eval.cuh" #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> #include <thrust/host_vector.h> struct func4 { func4():{} __host__ __device__ double operator(double& input)() { return -1.7; } }; int main(int argc, char* argv[]) { if(argc == 4){ FILE* fpIn = fopen(argv[1], "r"); FILE* fpOut = fopen(argv[2], "w"); double epsilon = atof(argv[3]); const double h = 1e-2; // n different input points, m variables ( x1,x2,.....xm) at each point int n, m; fscanf(fpIn, "%d", &n); fscanf(fpIn, "%d", &m); double *input = (double*) malloc(m * n * sizeof(double)); double *output = (double*) malloc(m * n * sizeof(double)); thrust::host_vector<double> hX(n*m); // Read all the input points from the file for (int i = 0; i < n; i++) { for(int j = 0; j < m; j++) { fscanf(fpIn, "%lf,", &input[i*m+j]); hX[i*m+j] = input[i*m+j]; } } // Start the timer // double start = omp_get_wtime(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, NULL); thrust::device_vector<double> dX = hX; thrust::device_vector<double> dout(n*m); // thrust::transform(dX.begin(),dX.end(),dout.begin(), func(m,n,h,epsilon)); // for(int i = 0; i < n; i++){ // int start_addr = i*m; // thrust::transform(&dX[start_addr],&dX[start_addr+m],&dout[start_addr], func3(m,&dX[start_addr],h,epsilon)); // } thrust::transform(dX.begin(),dX.end(),dout.begin(),func4()); thrust::copy(dout.begin(), dout.end(), &output[0]); // Stop the timer // double end = omp_get_wtime(); cudaEventRecord(stop,NULL); cudaEventSynchronize(stop); float msecTotal = 0.0f; cudaEventElapsedTime(&msecTotal, start, stop); // Write the result into the file for(int i = 0; i < n; i++) { for(int j = 0; j < m; j++) { if(j != m-1) fprintf(fpOut, "%.4lf,", output[i*m + j] ); else fprintf(fpOut, "%.4lf\n", output[i*m + j] ); } } fclose(fpIn); fclose(fpOut); double time = msecTotal; // time in ms // Create a new file to log the execution time FILE* fpLog = fopen("sequentialLog", "a"); fprintf(fpLog, "%d\t%d\t%lf\n", n, m, time); fclose(fpLog); free(input); free(output); } else{ printf("Insufficient arguments"); } return 0; }
afd24bbd8b6a262938683e4a5e1d174268d339c0.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************** * GPU * * @author ZhRiT * * @email [email protected] * * @version 1.0.1 * * @date 2018-10-31 * ***************************************************************/ #include "pch.h" #include "PSO.cuh" #include <hip/hip_runtime.h> #include <time.h> #include "psocudalib.cuh" #include <iostream> #include <device_launch_parameters.h> using namespace std; /** * @brief CPU */ void PSO::Run_GPU() { // cuda if (!psocudalib::InitCUDA()) { cout << "CUDA initializa failed!" << endl; return; } // unsigned int memoriaSize1 = (m_d * m_number) * sizeof(double); unsigned int memoriaSize2 = (m_number) * sizeof(double); unsigned int memoriaSize3 = (m_d) * sizeof(double); double *xx; // double *vx; // double *value; // double *pbestx; // double *pbest; // double *min; // double *max; // int *gbest; // ? // hipMalloc((void**)&xx, memoriaSize1); hipMalloc((void**)&vx, memoriaSize1); hipMalloc((void**)&pbestx, memoriaSize1); hipMalloc((void**)&pbest, memoriaSize2); hipMalloc((void**)&max, memoriaSize3); hipMalloc((void**)&min, memoriaSize3); hipMalloc((void**)&value, memoriaSize2);//? hipMalloc((void**)&gbest, sizeof(int));//? // CPU->GPU hipMemcpy(min, m_min, memoriaSize3, hipMemcpyHostToDevice); hipMemcpy(max, m_max, memoriaSize3, hipMemcpyHostToDevice); dim3 threads(16, 16); dim3 blocks(2, 2); dim3 threadsN(6, 6); dim3 blocksN(1, 1); // int threadNum = 1; while (threadNum < m_number) { threadNum <<= 1; } // psokernel::InitParticles << <blocks, threads >> > (xx, vx, pbestx, gbest, m_d, m_number, min, max); int iter = 0; do { psokernel::GetFitness << <blocksN, threadsN >> > (xx, value, m_d, m_number, 1); // psokernel::UpdatePbest << <blocks, threads >> > (xx, value, pbestx, pbest, m_d, m_number, iter); // psokernel::UpdateGbest << <blocksN, threadsN, 2 * m_number * sizeof(double) >> > (gbest, pbest, m_d, m_number, threadNum);// psokernel::UpdateParticles << <blocks, threads >> > (xx, vx, pbestx, gbest, m_d, m_number, min, max);// iter++; } while (iter < m_tMax); //m_tMax m_t_act = iter; m_status = OK; // //double *rxx = new double[m_d * m_number]; //double *rvx = new double[m_d * m_number]; //double *rv = new double[m_number]; double *rpx= new double[m_d * m_number]; double *rp = new double[m_number]; int *rg = new int; //hipMemcpy(rxx, xx, memoriaSize1, hipMemcpyDeviceToHost); //hipMemcpy(rvx, vx, memoriaSize1, hipMemcpyDeviceToHost); //hipMemcpy(rv, value, memoriaSize2, hipMemcpyDeviceToHost); hipMemcpy(rpx, pbestx, memoriaSize1, hipMemcpyDeviceToHost); hipMemcpy(rp, pbest, memoriaSize2, hipMemcpyDeviceToHost); hipMemcpy(rg, gbest, sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < m_d; i++) { m_result_pos[i] = rpx[m_d * *rg + i]; } m_result_value = rp[*rg]; hipFree(xx); hipFree(vx); hipFree(pbestx); hipFree(pbest); hipFree(value); hipFree(gbest); hipFree(min); hipFree(max); hipDeviceReset(); return; } namespace psokernel { /** * @breif * @param xx * @param vx * @param pbestx * @param gbest * @param d * @param n * @param min * @param max */ __global__ void InitParticles(double *xx, double *vx, double *pbestx, int *gbest, int d, int n, double *min, double *max) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // int index = bx * gridDim.x * blockDim.x * blockDim.y + by * blockDim.x * blockDim.y + ty * blockDim.x + tx; if (index < d * n) { int dind = index % d; xx[index] = (max[dind] - min[dind]) * psocudalib::randomNumber(index) + min[dind]; pbestx[index] = xx[index]; vx[index] = (max[dind] - min[dind]) * psocudalib::randomNumber(index) + min[dind] - xx[index]; if (index == 0) *gbest = 0; } } /** * @breif * @param xx * @param value * @param d * @param n * @param funcIdx */ __global__ void GetFitness(double *xx, double *value, int d, int n, int funcIdx) { int tx = threadIdx.x; int ty = threadIdx.y; int index = ty * blockDim.x + tx; // int offset = index * d; // xx if (index < n) { double val = 0.0; val = psocudalib::sphere(xx, d, offset); value[index] = val; } } /** * @breif * @param xx * @param value * @param pbestx * @param pbest * @param d * @param n * @param iter */ __global__ void UpdatePbest(double *xx, double *value, double *pbestx, double *pbest,int d, int n, int iter) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // int index = bx * gridDim.x * blockDim.x * blockDim.y + by * blockDim.x * blockDim.y + ty * blockDim.x + tx; int n_curr = index / d; if (index < d * n) { if (iter == 0) { pbestx[index] = xx[index]; pbest[n_curr] = value[n_curr]; } else { if (value[n_curr] < pbest[n_curr]) { pbestx[index] = xx[index]; pbest[n_curr] = value[n_curr]; } } } } /** * @breif * @param gbest * @param pbest * @param d * @param n * @param threadNum 2 */ __global__ void UpdateGbest(int *gbest, double *pbest, int d, int n, int threadNum) { extern __shared__ double s[]; double *pbestshared = s; int *indiceshared = (int*)&pbestshared[n]; int tx = threadIdx.x; int ty = threadIdx.y; int index = tx + ty * blockDim.x; //printf("%d, %d\n", indiceshared, pbestshared); if (index < n) { // indiceshared[index] = index; pbestshared[index] = pbest[index]; //printf("%d, %f, %f\n", index, pbestshared[index], pbest[index]); __syncthreads(); // for (int s = 1; s <= threadNum; s *= 2) { if (index % (2 * s) == 0) { if (index + s < n) { if (pbestshared[indiceshared[index]] > pbestshared[indiceshared[index + s]]) { indiceshared[index] = indiceshared[index + s]; } } } __syncthreads(); } } // if (index == 0) { *gbest = indiceshared[0]; } } /** * @breif * @param xx * @param vx * @param pbestx * @param gbest * @param d * @param n * @param min * @param max */ __global__ void UpdateParticles(double *xx, double *vx, double *pbestx, int *gbest, int d, int n, double *min, double *max) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // int index = bx * gridDim.x * blockDim.x * blockDim.y + by * blockDim.x * blockDim.y + ty * blockDim.x + tx; if (index < d * n) { int d_curr = index % d; double chi = 0.7298437881283576; double c = 2.05; double r1 = psocudalib::randomNumber(index); double r2 = psocudalib::randomNumber(index); vx[index] = chi * (vx[index] + c * r1 * (pbestx[index] - xx[index]) + c * r2 * (pbestx[d_curr + *gbest * d] - xx[index])); // xx[index] = xx[index] + vx[index]; // xx[index] = (max[dind] - min[dind]) * psocudalib::randomNumber(index) + min[dind]; if (xx[index] > max[d_curr]) { xx[index] = max[d_curr]; vx[index] = -vx[index] * 0.5; } else if (xx[index] < min[d_curr]) { xx[index] = min[d_curr]; vx[index] = -vx[index] * 0.5; } } } }
afd24bbd8b6a262938683e4a5e1d174268d339c0.cu
/*************************************************************** * 本文件包含粒子群算法类中关于GPU的函数的定义 * * @author ZhRiT * * @email [email protected] * * @version 1.0.1 * * @date 2018-10-31 * ***************************************************************/ #include "pch.h" #include "PSO.cuh" #include <cuda_runtime.h> #include <time.h> #include "psocudalib.cuh" #include <iostream> #include <device_launch_parameters.h> using namespace std; /** * @brief 在CPU上运行 */ void PSO::Run_GPU() { // 初始化cuda if (!psocudalib::InitCUDA()) { cout << "CUDA initializa failed!" << endl; return; } // 内存大小 unsigned int memoriaSize1 = (m_d * m_number) * sizeof(double); unsigned int memoriaSize2 = (m_number) * sizeof(double); unsigned int memoriaSize3 = (m_d) * sizeof(double); double *xx; // 位置 double *vx; // 速度 double *value; // 目标函数值 double *pbestx; // 个体最优位置 double *pbest; // 个体最优值 double *min; // 位置下界 double *max; // 位置上界 int *gbest; // 最优位置的标记? // 申请内存 cudaMalloc((void**)&xx, memoriaSize1); cudaMalloc((void**)&vx, memoriaSize1); cudaMalloc((void**)&pbestx, memoriaSize1); cudaMalloc((void**)&pbest, memoriaSize2); cudaMalloc((void**)&max, memoriaSize3); cudaMalloc((void**)&min, memoriaSize3); cudaMalloc((void**)&value, memoriaSize2);//? cudaMalloc((void**)&gbest, sizeof(int));//? // 数据传递CPU->GPU cudaMemcpy(min, m_min, memoriaSize3, cudaMemcpyHostToDevice); cudaMemcpy(max, m_max, memoriaSize3, cudaMemcpyHostToDevice); dim3 threads(16, 16); dim3 blocks(2, 2); dim3 threadsN(6, 6); dim3 blocksN(1, 1); // 比较树总数:大于粒子个数的最小二的幂 int threadNum = 1; while (threadNum < m_number) { threadNum <<= 1; } // 初始化粒子 psokernel::InitParticles << <blocks, threads >> > (xx, vx, pbestx, gbest, m_d, m_number, min, max); int iter = 0; do { psokernel::GetFitness << <blocksN, threadsN >> > (xx, value, m_d, m_number, 1); //计算目标函数值 psokernel::UpdatePbest << <blocks, threads >> > (xx, value, pbestx, pbest, m_d, m_number, iter); //更新个体最优 psokernel::UpdateGbest << <blocksN, threadsN, 2 * m_number * sizeof(double) >> > (gbest, pbest, m_d, m_number, threadNum);//更新全局最优 psokernel::UpdateParticles << <blocks, threads >> > (xx, vx, pbestx, gbest, m_d, m_number, min, max);// 更新粒子位置 iter++; } while (iter < m_tMax); //m_tMax m_t_act = iter; m_status = OK; // 取结果 //double *rxx = new double[m_d * m_number]; //double *rvx = new double[m_d * m_number]; //double *rv = new double[m_number]; double *rpx= new double[m_d * m_number]; double *rp = new double[m_number]; int *rg = new int; //cudaMemcpy(rxx, xx, memoriaSize1, cudaMemcpyDeviceToHost); //cudaMemcpy(rvx, vx, memoriaSize1, cudaMemcpyDeviceToHost); //cudaMemcpy(rv, value, memoriaSize2, cudaMemcpyDeviceToHost); cudaMemcpy(rpx, pbestx, memoriaSize1, cudaMemcpyDeviceToHost); cudaMemcpy(rp, pbest, memoriaSize2, cudaMemcpyDeviceToHost); cudaMemcpy(rg, gbest, sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < m_d; i++) { m_result_pos[i] = rpx[m_d * *rg + i]; } m_result_value = rp[*rg]; cudaFree(xx); cudaFree(vx); cudaFree(pbestx); cudaFree(pbest); cudaFree(value); cudaFree(gbest); cudaFree(min); cudaFree(max); cudaThreadExit(); return; } namespace psokernel { /** * @breif 随机初始化粒子 * @param xx 位置 * @param vx 速度 * @param pbestx 个体最优位置 * @param gbest 全局最优标记 * @param d 维数 * @param n 粒子个数 * @param min 位置下界 * @param max 位置上界 */ __global__ void InitParticles(double *xx, double *vx, double *pbestx, int *gbest, int d, int n, double *min, double *max) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // 计算编号 int index = bx * gridDim.x * blockDim.x * blockDim.y + by * blockDim.x * blockDim.y + ty * blockDim.x + tx; if (index < d * n) { int dind = index % d; xx[index] = (max[dind] - min[dind]) * psocudalib::randomNumber(index) + min[dind]; pbestx[index] = xx[index]; vx[index] = (max[dind] - min[dind]) * psocudalib::randomNumber(index) + min[dind] - xx[index]; if (index == 0) *gbest = 0; } } /** * @breif 求目标函数值 * @param xx 位置 * @param value 目标函数值 * @param d 维数 * @param n 粒子个数 * @param funcIdx 目标函数序号 */ __global__ void GetFitness(double *xx, double *value, int d, int n, int funcIdx) { int tx = threadIdx.x; int ty = threadIdx.y; int index = ty * blockDim.x + tx; // 粒子序号 int offset = index * d; // 粒子在xx数组中的偏移(位置) if (index < n) { double val = 0.0; val = psocudalib::sphere(xx, d, offset); value[index] = val; } } /** * @breif 更新个体最优 * @param xx 位置 * @param value 目标函数值 * @param pbestx 个体最优位置 * @param pbest 个体最优值 * @param d 维数 * @param n 粒子个数 * @param iter 当前迭代次数 */ __global__ void UpdatePbest(double *xx, double *value, double *pbestx, double *pbest,int d, int n, int iter) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // 计算编号 有共享内存的问题等待优化 int index = bx * gridDim.x * blockDim.x * blockDim.y + by * blockDim.x * blockDim.y + ty * blockDim.x + tx; int n_curr = index / d; if (index < d * n) { if (iter == 0) { pbestx[index] = xx[index]; pbest[n_curr] = value[n_curr]; } else { if (value[n_curr] < pbest[n_curr]) { pbestx[index] = xx[index]; pbest[n_curr] = value[n_curr]; } } } } /** * @breif 更新个体最优 * @param gbest 全局最优标记 * @param pbest 个体最优值 * @param d 维数 * @param n 粒子个数 * @param threadNum 线程数 大于等于粒子个数的最小2的幂 */ __global__ void UpdateGbest(int *gbest, double *pbest, int d, int n, int threadNum) { extern __shared__ double s[]; double *pbestshared = s; int *indiceshared = (int*)&pbestshared[n]; int tx = threadIdx.x; int ty = threadIdx.y; int index = tx + ty * blockDim.x; //printf("%d, %d\n", indiceshared, pbestshared); if (index < n) { // 数据复制到共享内存 indiceshared[index] = index; pbestshared[index] = pbest[index]; //printf("%d, %f, %f\n", index, pbestshared[index], pbest[index]); __syncthreads(); // 比较树 for (int s = 1; s <= threadNum; s *= 2) { if (index % (2 * s) == 0) { if (index + s < n) { if (pbestshared[indiceshared[index]] > pbestshared[indiceshared[index + s]]) { indiceshared[index] = indiceshared[index + s]; } } } __syncthreads(); } } // 取出全局最优的标记 if (index == 0) { *gbest = indiceshared[0]; } } /** * @breif 更新粒子位置 * @param xx 位置 * @param vx 速度 * @param pbestx 个体最优位置 * @param gbest 全局最优标记 * @param d 维数 * @param n 粒子个数 * @param min 位置下界 * @param max 位置上界 */ __global__ void UpdateParticles(double *xx, double *vx, double *pbestx, int *gbest, int d, int n, double *min, double *max) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // 计算编号 有共享内存的问题等待优化 int index = bx * gridDim.x * blockDim.x * blockDim.y + by * blockDim.x * blockDim.y + ty * blockDim.x + tx; if (index < d * n) { int d_curr = index % d; double chi = 0.7298437881283576; double c = 2.05; double r1 = psocudalib::randomNumber(index); double r2 = psocudalib::randomNumber(index); vx[index] = chi * (vx[index] + c * r1 * (pbestx[index] - xx[index]) + c * r2 * (pbestx[d_curr + *gbest * d] - xx[index])); // 目前没有限制速度大小 xx[index] = xx[index] + vx[index]; // 范围 xx[index] = (max[dind] - min[dind]) * psocudalib::randomNumber(index) + min[dind]; if (xx[index] > max[d_curr]) { xx[index] = max[d_curr]; vx[index] = -vx[index] * 0.5; } else if (xx[index] < min[d_curr]) { xx[index] = min[d_curr]; vx[index] = -vx[index] * 0.5; } } } }
e93314428678cdc2262614e01db9828ca19caf64.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "saber/funcs/impl/cuda/saber_soft_sign.h" #include "hip/hip_fp16.h" namespace anakin{ namespace saber{ template<typename Dtype> __global__ void ker_soft_sign_fwd(Dtype * out_data, const Dtype* in_data, const int count) { CUDA_KERNEL_LOOP(tid, count) { Dtype in_var = in_data[tid]; Dtype in_abs = in_var > 0 ? in_var : -in_var; out_data[tid] = in_var / (in_abs + (Dtype)1.f); } } template <DataType OpDtype> SaberStatus SaberSoftSign<NV, OpDtype>::dispatch( \ const std::vector<Tensor<NV>*>& inputs, std::vector<Tensor<NV>*>& outputs, SoftSignParam<NV>& param) { const OpDataType *in_data = (const OpDataType*)inputs[0]->data(); OpDataType *out_data = (OpDataType*)outputs[0]->mutable_data(); const int count = inputs[0]->valid_size(); hipStream_t cuda_stream = this->_ctx->get_compute_stream(); //y = x / (x + 1) hipLaunchKernelGGL(( ker_soft_sign_fwd<OpDataType>) , dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, out_data, in_data, count); CUDA_POST_KERNEL_CHECK; return SaberSuccess; } template class SaberSoftSign<NV, AK_FLOAT>; DEFINE_OP_TEMPLATE(SaberSoftSign, SoftSignParam, NV, AK_INT8); DEFINE_OP_TEMPLATE(SaberSoftSign, SoftSignParam, NV, AK_HALF); } }
e93314428678cdc2262614e01db9828ca19caf64.cu
#include "saber/funcs/impl/cuda/saber_soft_sign.h" #include "cuda_fp16.h" namespace anakin{ namespace saber{ template<typename Dtype> __global__ void ker_soft_sign_fwd(Dtype * out_data, const Dtype* in_data, const int count) { CUDA_KERNEL_LOOP(tid, count) { Dtype in_var = in_data[tid]; Dtype in_abs = in_var > 0 ? in_var : -in_var; out_data[tid] = in_var / (in_abs + (Dtype)1.f); } } template <DataType OpDtype> SaberStatus SaberSoftSign<NV, OpDtype>::dispatch( \ const std::vector<Tensor<NV>*>& inputs, std::vector<Tensor<NV>*>& outputs, SoftSignParam<NV>& param) { const OpDataType *in_data = (const OpDataType*)inputs[0]->data(); OpDataType *out_data = (OpDataType*)outputs[0]->mutable_data(); const int count = inputs[0]->valid_size(); cudaStream_t cuda_stream = this->_ctx->get_compute_stream(); //y = x / (x + 1) ker_soft_sign_fwd<OpDataType> <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>( out_data, in_data, count); CUDA_POST_KERNEL_CHECK; return SaberSuccess; } template class SaberSoftSign<NV, AK_FLOAT>; DEFINE_OP_TEMPLATE(SaberSoftSign, SoftSignParam, NV, AK_INT8); DEFINE_OP_TEMPLATE(SaberSoftSign, SoftSignParam, NV, AK_HALF); } }
42dc958b875d92601202ef16afb597710ec73b2f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cu_elementWiseMultiply.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); float B = 2; const int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cu_elementWiseMultiply), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cu_elementWiseMultiply), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cu_elementWiseMultiply), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
42dc958b875d92601202ef16afb597710ec73b2f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cu_elementWiseMultiply.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); float B = 2; const int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cu_elementWiseMultiply<<<gridBlock,threadBlock>>>(A,B,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cu_elementWiseMultiply<<<gridBlock,threadBlock>>>(A,B,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cu_elementWiseMultiply<<<gridBlock,threadBlock>>>(A,B,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
04eb037d0b6394b6724d1a53d0ae14cab84b4f78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void f1d3(float3 * __restrict__ ptr) { float3 v = ptr[threadIdx.x]; v.x += 1; v.y += 1; v.z += 1; ptr[threadIdx.x] = v; return; } // __global__ void f1(float4 * __restrict__ ptr) { // float4 v = ptr[threadIdx.x]; // v.x += 1; // v.y += 1; // v.z += 1; // v.w += 1; // ptr[threadIdx.x] = v; // return; // } int main() { // float4 *f1_ptr; // hipMalloc(&f1_ptr, sizeof(float)*128); // hipMemset(f1_ptr, 0, sizeof(float)*128); float3 *f1_ptr; hipMalloc(&f1_ptr, sizeof(float)*96); hipMemset(f1_ptr, 0, sizeof(float)*96); // global fuc // Timer t1, t2; // t1.Start(); // f1<<<1,32>>>(f1_ptr); hipLaunchKernelGGL(( f1d3), dim3(1),dim3(32), 0, 0, f1_ptr); // t1.Pause(); // printf_timer(t1); hipFree(f1_ptr); return 0; }
04eb037d0b6394b6724d1a53d0ae14cab84b4f78.cu
__global__ void f1d3(float3 * __restrict__ ptr) { float3 v = ptr[threadIdx.x]; v.x += 1; v.y += 1; v.z += 1; ptr[threadIdx.x] = v; return; } // __global__ void f1(float4 * __restrict__ ptr) { // float4 v = ptr[threadIdx.x]; // v.x += 1; // v.y += 1; // v.z += 1; // v.w += 1; // ptr[threadIdx.x] = v; // return; // } int main() { // float4 *f1_ptr; // cudaMalloc(&f1_ptr, sizeof(float)*128); // cudaMemset(f1_ptr, 0, sizeof(float)*128); float3 *f1_ptr; cudaMalloc(&f1_ptr, sizeof(float)*96); cudaMemset(f1_ptr, 0, sizeof(float)*96); // global fuc // Timer t1, t2; // t1.Start(); // f1<<<1,32>>>(f1_ptr); f1d3<<<1,32>>>(f1_ptr); // t1.Pause(); // printf_timer(t1); cudaFree(f1_ptr); return 0; }
6aa3cc2fada180cff7581885fdb4c4c73f91a56e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <ops/specials_cuda.h> ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> SD_KERNEL void execOesTadKernelKey(void *vx, sd::LongType const *xShapeInfo, void *vy, sd::LongType const *yShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, bool descending) { auto x = static_cast<X *>(vx); auto y = static_cast<Y *>(vy); __shared__ int xLength; __shared__ int xTadLength; __shared__ int numTads; if (threadIdx.x == 0) { xLength = shape::length(xShapeInfo); xTadLength = shape::length(tadShapeInfo); numTads = xLength / xTadLength; } __syncthreads(); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { auto dx = x + tadOffsets[r]; auto dy = y + tadOffsets[r]; // this is general loop, we go uncached int iterations = xTadLength; for (int i = 0; i < iterations; i++) { if (i % 2 == 0) { for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) { auto top = 2 * tid + 1; if (top < xTadLength) { auto t0 = shape::getIndexOffset(top - 1, tadShapeInfo); auto t1 = shape::getIndexOffset(top, tadShapeInfo); if (!descending == (dx[t0] > dx[t1])) { X dt0 = dx[t0]; dx[t0] = dx[t1]; dx[t1] = dt0; Y dy0 = dy[t0]; dy[t0] = dy[t1]; dy[t1] = dy0; } } } } else { for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) { auto top = 2 * tid + 2; if (top < xTadLength) { auto t0 = shape::getIndexOffset(top - 1, tadShapeInfo); auto t1 = shape::getIndexOffset(top, tadShapeInfo); if (!descending == (dx[t0] > dx[t1])) { X dt0 = dx[t0]; dx[t0] = dx[t1]; dx[t1] = dt0; Y dy0 = dy[t0]; dy[t0] = dy[t1]; dy[t1] = dy0; } } } } __syncthreads(); } } } ////////////////////////////////////////////////////////////////////////// template <typename T> SD_KERNEL void execOesTadKernel(void *vx, sd::LongType const *xShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, bool descending) { auto x = static_cast<T *>(vx); const int sharedSize = 32768; __shared__ int xLength; __shared__ int xTadLength; __shared__ int numTads; __shared__ T *shmem; __shared__ bool cached; if (threadIdx.x == 0) { xLength = shape::length(xShapeInfo); xTadLength = shape::length(tadShapeInfo); numTads = xLength / xTadLength; extern __shared__ unsigned char shrd[]; shmem = (T *)shrd; cached = xTadLength <= (sharedSize / sizeof(T)); } __syncthreads(); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { auto dx = x + tadOffsets[r]; // this is general loop, we go uncached int iterations = xTadLength; if (cached) { for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) { auto t0 = shape::getIndexOffset(tid, tadShapeInfo); shmem[tid] = dx[t0]; } __syncthreads(); dx = shmem; } for (int i = 0; i < iterations; i++) { if (i % 2 == 0) { for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) { auto top = 2 * tid + 1; if (top < xTadLength) { auto t0 = cached ? top - 1 : shape::getIndexOffset(top - 1, tadShapeInfo); auto t1 = cached ? top : shape::getIndexOffset(top, tadShapeInfo); if (!descending == (dx[t0] > dx[t1])) { T dt0 = dx[t0]; dx[t0] = dx[t1]; dx[t1] = dt0; } } } } else { for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) { auto top = 2 * tid + 2; if (top < xTadLength) { auto t0 = cached ? top - 1 : shape::getIndexOffset(top - 1, tadShapeInfo); auto t1 = cached ? top : shape::getIndexOffset(top, tadShapeInfo); if (!descending == (dx[t0] > dx[t1])) { T dt0 = dx[t0]; dx[t0] = dx[t1]; dx[t1] = dt0; } } } } __syncthreads(); } if (cached) { dx = x + tadOffsets[r]; for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) { auto t0 = shape::getIndexOffset(tid, tadShapeInfo); dx[t0] = shmem[tid]; } } } } ////////////////////////////////////////////////////////////////////////// template <typename T> SD_HOST void oesTadGeneric(dim3 &launchDims, hipStream_t *stream, void *vx, sd::LongType const *xShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, bool descending) { hipLaunchKernelGGL(( execOesTadKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending); } template <typename X, typename Y> SD_HOST void oesTadGenericKey(dim3 &launchDims, hipStream_t *stream, void *vx, sd::LongType const *xShapeInfo, void *vy, sd::LongType const *yShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, bool descending) { hipLaunchKernelGGL(( execOesTadKernelKey<X, Y>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, vy, yShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending); } BUILD_SINGLE_TEMPLATE(template void oesTadGeneric, (dim3 & launchDims, hipStream_t *stream, void *vx, sd::LongType const *xShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, bool descending), SD_COMMON_TYPES); BUILD_DOUBLE_TEMPLATE(template void oesTadGenericKey, (dim3 & launchDims, hipStream_t *stream, void *vx, sd::LongType const *xShapeInfo, void *vy, sd::LongType const *yShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, bool descending), SD_COMMON_TYPES, SD_COMMON_TYPES);
6aa3cc2fada180cff7581885fdb4c4c73f91a56e.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <ops/specials_cuda.h> ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> SD_KERNEL void execOesTadKernelKey(void *vx, sd::LongType const *xShapeInfo, void *vy, sd::LongType const *yShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, bool descending) { auto x = static_cast<X *>(vx); auto y = static_cast<Y *>(vy); __shared__ int xLength; __shared__ int xTadLength; __shared__ int numTads; if (threadIdx.x == 0) { xLength = shape::length(xShapeInfo); xTadLength = shape::length(tadShapeInfo); numTads = xLength / xTadLength; } __syncthreads(); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { auto dx = x + tadOffsets[r]; auto dy = y + tadOffsets[r]; // this is general loop, we go uncached int iterations = xTadLength; for (int i = 0; i < iterations; i++) { if (i % 2 == 0) { for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) { auto top = 2 * tid + 1; if (top < xTadLength) { auto t0 = shape::getIndexOffset(top - 1, tadShapeInfo); auto t1 = shape::getIndexOffset(top, tadShapeInfo); if (!descending == (dx[t0] > dx[t1])) { X dt0 = dx[t0]; dx[t0] = dx[t1]; dx[t1] = dt0; Y dy0 = dy[t0]; dy[t0] = dy[t1]; dy[t1] = dy0; } } } } else { for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) { auto top = 2 * tid + 2; if (top < xTadLength) { auto t0 = shape::getIndexOffset(top - 1, tadShapeInfo); auto t1 = shape::getIndexOffset(top, tadShapeInfo); if (!descending == (dx[t0] > dx[t1])) { X dt0 = dx[t0]; dx[t0] = dx[t1]; dx[t1] = dt0; Y dy0 = dy[t0]; dy[t0] = dy[t1]; dy[t1] = dy0; } } } } __syncthreads(); } } } ////////////////////////////////////////////////////////////////////////// template <typename T> SD_KERNEL void execOesTadKernel(void *vx, sd::LongType const *xShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, bool descending) { auto x = static_cast<T *>(vx); const int sharedSize = 32768; __shared__ int xLength; __shared__ int xTadLength; __shared__ int numTads; __shared__ T *shmem; __shared__ bool cached; if (threadIdx.x == 0) { xLength = shape::length(xShapeInfo); xTadLength = shape::length(tadShapeInfo); numTads = xLength / xTadLength; extern __shared__ unsigned char shrd[]; shmem = (T *)shrd; cached = xTadLength <= (sharedSize / sizeof(T)); } __syncthreads(); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { auto dx = x + tadOffsets[r]; // this is general loop, we go uncached int iterations = xTadLength; if (cached) { for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) { auto t0 = shape::getIndexOffset(tid, tadShapeInfo); shmem[tid] = dx[t0]; } __syncthreads(); dx = shmem; } for (int i = 0; i < iterations; i++) { if (i % 2 == 0) { for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) { auto top = 2 * tid + 1; if (top < xTadLength) { auto t0 = cached ? top - 1 : shape::getIndexOffset(top - 1, tadShapeInfo); auto t1 = cached ? top : shape::getIndexOffset(top, tadShapeInfo); if (!descending == (dx[t0] > dx[t1])) { T dt0 = dx[t0]; dx[t0] = dx[t1]; dx[t1] = dt0; } } } } else { for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) { auto top = 2 * tid + 2; if (top < xTadLength) { auto t0 = cached ? top - 1 : shape::getIndexOffset(top - 1, tadShapeInfo); auto t1 = cached ? top : shape::getIndexOffset(top, tadShapeInfo); if (!descending == (dx[t0] > dx[t1])) { T dt0 = dx[t0]; dx[t0] = dx[t1]; dx[t1] = dt0; } } } } __syncthreads(); } if (cached) { dx = x + tadOffsets[r]; for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) { auto t0 = shape::getIndexOffset(tid, tadShapeInfo); dx[t0] = shmem[tid]; } } } } ////////////////////////////////////////////////////////////////////////// template <typename T> SD_HOST void oesTadGeneric(dim3 &launchDims, cudaStream_t *stream, void *vx, sd::LongType const *xShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, bool descending) { execOesTadKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending); } template <typename X, typename Y> SD_HOST void oesTadGenericKey(dim3 &launchDims, cudaStream_t *stream, void *vx, sd::LongType const *xShapeInfo, void *vy, sd::LongType const *yShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, bool descending) { execOesTadKernelKey<X, Y><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>( vx, xShapeInfo, vy, yShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending); } BUILD_SINGLE_TEMPLATE(template void oesTadGeneric, (dim3 & launchDims, cudaStream_t *stream, void *vx, sd::LongType const *xShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, bool descending), SD_COMMON_TYPES); BUILD_DOUBLE_TEMPLATE(template void oesTadGenericKey, (dim3 & launchDims, cudaStream_t *stream, void *vx, sd::LongType const *xShapeInfo, void *vy, sd::LongType const *yShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, bool descending), SD_COMMON_TYPES, SD_COMMON_TYPES);
7343fffeb98a4a6b5ef779256eb4b5643bddf75a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdint> #include <cassert> __global__ void testPrefixScan(uint32_t size) { __shared__ uint16_t c[1024]; auto first = threadIdx.x; for (auto i=first; i<size; i+=blockDim.x) c[i]=1; __syncthreads(); unsigned mask = __ballot_sync(0xffffffff,first<size); // unsigned mask = 0xffffffff; #ifdef NOLOOP auto i=first; #else for (auto i=first; i<size; i+=blockDim.x) #endif { auto x = c[i]; auto laneId = threadIdx.x & 0x1f; #pragma unroll for( int offset = 1 ; offset < 32 ; offset <<= 1 ) { auto y = __shfl_up_sync(mask,x, offset); if(laneId >= offset) x += y; } c[i] = x; mask = __ballot_sync(mask,i+blockDim.x<size); } __syncthreads(); } #include<iostream> int main() { hipLaunchKernelGGL(( testPrefixScan), dim3(1),dim3(32), 0, 0, 45); hipDeviceSynchronize(); return 0; }
7343fffeb98a4a6b5ef779256eb4b5643bddf75a.cu
#include <cstdint> #include <cassert> __global__ void testPrefixScan(uint32_t size) { __shared__ uint16_t c[1024]; auto first = threadIdx.x; for (auto i=first; i<size; i+=blockDim.x) c[i]=1; __syncthreads(); unsigned mask = __ballot_sync(0xffffffff,first<size); // unsigned mask = 0xffffffff; #ifdef NOLOOP auto i=first; #else for (auto i=first; i<size; i+=blockDim.x) #endif { auto x = c[i]; auto laneId = threadIdx.x & 0x1f; #pragma unroll for( int offset = 1 ; offset < 32 ; offset <<= 1 ) { auto y = __shfl_up_sync(mask,x, offset); if(laneId >= offset) x += y; } c[i] = x; mask = __ballot_sync(mask,i+blockDim.x<size); } __syncthreads(); } #include<iostream> int main() { testPrefixScan<<<1,32>>>(45); cudaDeviceSynchronize(); return 0; }
5f91e8a7d9e6b517ccd2142094a4e8e379cd5b45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s #include "Inputs/cuda.h" __global__ void k1() {} template<int ...Dimensions> void k1Wrapper() { void (*f)() = [] {hipLaunchKernelGGL(( k1), dim3(Dimensions), dim3(Dimensions), 0, 0, ); }; // expected-error {{initializer contains unexpanded parameter pack 'Dimensions'}} void (*g[])() = { [] {hipLaunchKernelGGL(( k1), dim3(Dimensions), dim3(Dimensions), 0, 0, ); } ... }; // ok }
5f91e8a7d9e6b517ccd2142094a4e8e379cd5b45.cu
// RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s #include "Inputs/cuda.h" __global__ void k1() {} template<int ...Dimensions> void k1Wrapper() { void (*f)() = [] { k1<<<Dimensions, Dimensions>>>(); }; // expected-error {{initializer contains unexpanded parameter pack 'Dimensions'}} void (*g[])() = { [] { k1<<<Dimensions, Dimensions>>>(); } ... }; // ok }
574acad0b6cfe5914478ec121c810e5bc44e64df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2011-2014, The University of Oxford * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the University of Oxford nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "sky/oskar_scale_flux_with_frequency_cuda.h" #include "sky/oskar_scale_flux_with_frequency_inline.h" /* Kernels. ================================================================ */ /* Single precision. */ __global__ void oskar_scale_flux_with_frequency_cudak_f(const int num_sources, const float frequency, float* restrict I, float* restrict Q, float* restrict U, float* restrict V, float* restrict ref_freq, const float* restrict sp_index, const float* restrict rm) { /* Get source index and check bounds. */ const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num_sources) return; oskar_scale_flux_with_frequency_inline_f(frequency, &I[i], &Q[i], &U[i], &V[i], &ref_freq[i], sp_index[i], rm[i]); } /* Double precision. */ __global__ void oskar_scale_flux_with_frequency_cudak_d(const int num_sources, const double frequency, double* restrict I, double* restrict Q, double* restrict U, double* restrict V, double* restrict ref_freq, const double* restrict sp_index, const double* restrict rm) { /* Get source index and check bounds. */ const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num_sources) return; oskar_scale_flux_with_frequency_inline_d(frequency, &I[i], &Q[i], &U[i], &V[i], &ref_freq[i], sp_index[i], rm[i]); } #ifdef __cplusplus extern "C" { #endif /* Kernel wrappers. ======================================================== */ /* Single precision. */ void oskar_scale_flux_with_frequency_cuda_f(int num_sources, float frequency, float* d_I, float* d_Q, float* d_U, float* d_V, float* d_ref_freq, const float* d_sp_index, const float* d_rm) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_scale_flux_with_frequency_cudak_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_sources, frequency, d_I, d_Q, d_U, d_V, d_ref_freq, d_sp_index, d_rm); } /* Double precision. */ void oskar_scale_flux_with_frequency_cuda_d(int num_sources, double frequency, double* d_I, double* d_Q, double* d_U, double* d_V, double* d_ref_freq, const double* d_sp_index, const double* d_rm) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_scale_flux_with_frequency_cudak_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_sources, frequency, d_I, d_Q, d_U, d_V, d_ref_freq, d_sp_index, d_rm); } #ifdef __cplusplus } #endif
574acad0b6cfe5914478ec121c810e5bc44e64df.cu
/* * Copyright (c) 2011-2014, The University of Oxford * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the University of Oxford nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "sky/oskar_scale_flux_with_frequency_cuda.h" #include "sky/oskar_scale_flux_with_frequency_inline.h" /* Kernels. ================================================================ */ /* Single precision. */ __global__ void oskar_scale_flux_with_frequency_cudak_f(const int num_sources, const float frequency, float* restrict I, float* restrict Q, float* restrict U, float* restrict V, float* restrict ref_freq, const float* restrict sp_index, const float* restrict rm) { /* Get source index and check bounds. */ const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num_sources) return; oskar_scale_flux_with_frequency_inline_f(frequency, &I[i], &Q[i], &U[i], &V[i], &ref_freq[i], sp_index[i], rm[i]); } /* Double precision. */ __global__ void oskar_scale_flux_with_frequency_cudak_d(const int num_sources, const double frequency, double* restrict I, double* restrict Q, double* restrict U, double* restrict V, double* restrict ref_freq, const double* restrict sp_index, const double* restrict rm) { /* Get source index and check bounds. */ const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num_sources) return; oskar_scale_flux_with_frequency_inline_d(frequency, &I[i], &Q[i], &U[i], &V[i], &ref_freq[i], sp_index[i], rm[i]); } #ifdef __cplusplus extern "C" { #endif /* Kernel wrappers. ======================================================== */ /* Single precision. */ void oskar_scale_flux_with_frequency_cuda_f(int num_sources, float frequency, float* d_I, float* d_Q, float* d_U, float* d_V, float* d_ref_freq, const float* d_sp_index, const float* d_rm) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_scale_flux_with_frequency_cudak_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_sources, frequency, d_I, d_Q, d_U, d_V, d_ref_freq, d_sp_index, d_rm); } /* Double precision. */ void oskar_scale_flux_with_frequency_cuda_d(int num_sources, double frequency, double* d_I, double* d_Q, double* d_U, double* d_V, double* d_ref_freq, const double* d_sp_index, const double* d_rm) { int num_blocks, num_threads = 256; num_blocks = (num_sources + num_threads - 1) / num_threads; oskar_scale_flux_with_frequency_cudak_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_sources, frequency, d_I, d_Q, d_U, d_V, d_ref_freq, d_sp_index, d_rm); } #ifdef __cplusplus } #endif
eb0a4297462d0abddd4eab7e232daf25e7bc4aff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #section support_code_apply // ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include <cfloat> using std::max; using std::min; #define Dtype float // The following chunks are borrowed from Caffe. ------------------------------ // CUDA: various checks for different function calls. #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ return 1; \ } \ } while (0) // CUDA: grid stride looping #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) // CUDA: check for error after kernel execution and exit loudly if there is one. #define CUDA_POST_KERNEL_CHECK CUDA_CHECK(hipPeekAtLastError()) // CUDA: use 512 threads per block const int CAFFE_CUDA_NUM_THREADS = 512; // CUDA: number of blocks for threads. inline int CAFFE_GET_BLOCKS(const int N) { return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; } // ---------------------------------------------------------------------------- __global__ void APPLY_SPECIFIC(ROIAlignForward)( const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, Dtype* argmax_data_x, Dtype* argmax_data_y) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = bottom_rois[3] * spatial_scale; Dtype roi_end_h = bottom_rois[4] * spatial_scale; Dtype roi_width = roi_end_w - roi_start_w + 1; Dtype roi_height = roi_end_h - roi_start_h + 1; Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); Dtype hstart = static_cast<Dtype>(ph) * bin_size_h; Dtype wstart = static_cast<Dtype>(pw) * bin_size_w; Dtype hend = static_cast<Dtype>(ph + 1) * bin_size_h; Dtype wend = static_cast<Dtype>(pw + 1) * bin_size_w; // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0.), static_cast<float>(height)); hend = min(max(hend + roi_start_h, 0.), static_cast<float>(height)); wstart = min(max(wstart + roi_start_w, 0.), static_cast<float>(width)); wend = min(max(wend + roi_start_w, 0.), static_cast<float>(width)); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd Dtype maxidx_x = -1; Dtype maxidx_y = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (Dtype h = hstart; h < hend; h += 1.) { for (Dtype w = wstart; w < wend; w += 1.) { // Selecting four regular locations for bilinear interpolation int x_left = floor(w); int y_bottom = floor(h); int x_right = x_left + 1; int y_top = y_bottom + 1; int top_left_index = y_top * width + x_left; int top_right_index = y_top * width + x_right; int bottom_left_index = y_bottom * width + x_left; int bottom_right_index = y_bottom * width + x_right; bool is_top_left_in = x_left >= 0 && x_left <= width - 1 && y_top >= 0 && y_top <= height - 1; bool is_top_right_in = x_right >= 0 && x_right <= width - 1 && y_top >= 0 && y_top <= height - 1; bool is_bottom_left_in = x_left >= 0 && x_left <= width - 1 && y_bottom >= 0 && y_bottom <= height - 1; bool is_bottom_right_in = x_right >= 0 && x_right <= width - 1 && y_bottom >= 0 && y_bottom <= height - 1; Dtype val = 0; if (is_top_left_in) val += (1 - w + x_left) * (h - y_bottom) * bottom_data[top_left_index]; if (is_top_right_in) val += (w - x_left) * (h - y_bottom) * bottom_data[top_right_index]; if (is_bottom_left_in) val += (1 - w + x_left) * (1 - h + y_bottom) * bottom_data[bottom_left_index]; if (is_bottom_right_in) val += (w - x_left) * (1 - h + y_bottom) * bottom_data[bottom_right_index]; if (val > maxval) { maxval = val; maxidx_x = w; maxidx_y = h; } } } top_data[index] = maxval; argmax_data_x[index] = maxidx_x; argmax_data_y[index] = maxidx_y; } } __global__ void APPLY_SPECIFIC(ROIAlignBackward)( const int nthreads, const Dtype* top_diff, const Dtype* argmax_data_x, const Dtype* argmax_data_y, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } // And it assumes that we don't have any negative offset of course int roi_start_w = floor(offset_bottom_rois[1] * spatial_scale); int roi_start_h = floor(offset_bottom_rois[2] * spatial_scale); int roi_end_w = ceil(offset_bottom_rois[3] * spatial_scale); int roi_end_h = ceil(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const Dtype* offset_argmax_data_x = argmax_data_x + offset; const Dtype* offset_argmax_data_y = argmax_data_y + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit Dtype roi_width = roi_end_w - roi_start_w + 1; Dtype roi_height = roi_end_h - roi_start_h + 1; Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); for (int ph = phstart; ph <= phend; ++ph) { for (int pw = pwstart; pw <= pwend; ++pw) { int index = ph * pooled_width + pw; Dtype max_x = offset_argmax_data_x[index]; Dtype max_y = offset_argmax_data_y[index]; int x_left = floor(max_x); int x_right = x_left + 1; int y_bottom = floor(max_y); int y_top = y_bottom + 1; if (x_left == w && y_top == h) gradient += (1 - max_x + x_left) * (1 - y_top + max_y) * offset_top_diff[index]; else if (x_left == w && y_bottom == h) gradient += (1 - max_x + x_left) * (1 - max_y + y_bottom) * offset_top_diff[index]; else if (x_right == w && y_top == h) gradient += (1 - x_right + max_x) * (1 - y_top + max_y) * offset_top_diff[index]; else if (x_right == w && y_bottom == h) gradient += (1 - x_right + max_x) * (1 - max_y + y_bottom) * offset_top_diff[index]; } } } bottom_diff[index] = gradient; } } int APPLY_SPECIFIC(Forward_gpu)(CudaNdarray* data, CudaNdarray* rois, CudaNdarray** out, CudaNdarray** argmaxes_x, CudaNdarray** argmaxes_y) { int batch_size = CudaNdarray_DIMS(rois)[0]; int channels = CudaNdarray_DIMS(data)[1]; int height = CudaNdarray_DIMS(data)[2]; int width = CudaNdarray_DIMS(data)[3]; // Prepare outputs. int dims[] = {0, 0, 0, 0}; dims[0] = batch_size; dims[1] = channels; dims[2] = POOLED_HEIGHT; dims[3] = POOLED_WIDTH; int count = batch_size * channels * POOLED_HEIGHT * POOLED_WIDTH; CudaNdarray_prep_output(out, 4, dims); CudaNdarray_prep_output(argmaxes_x, 4, dims); CudaNdarray_prep_output(argmaxes_y, 4, dims); hipLaunchKernelGGL(( APPLY_SPECIFIC(ROIAlignForward)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, data->devdata, SPATIAL_SCALE, channels, height, width, POOLED_HEIGHT, POOLED_WIDTH, rois->devdata, (*out)->devdata, (*argmaxes_x)->devdata, (*argmaxes_y)->devdata); CUDA_POST_KERNEL_CHECK; return 0; } int APPLY_SPECIFIC(Backward_gpu)(CudaNdarray* data, CudaNdarray* rois, CudaNdarray* argmaxes_x, CudaNdarray* argmaxes_y, CudaNdarray* out_grad, CudaNdarray** data_grad) { int count = CudaNdarray_SIZE(data); int batch_size = CudaNdarray_DIMS(rois)[0]; int channels = CudaNdarray_DIMS(data)[1]; int height = CudaNdarray_DIMS(data)[2]; int width = CudaNdarray_DIMS(data)[3]; // Prepare data grad. CudaNdarray_prep_output(data_grad, 4, CudaNdarray_DIMS(data)); hipMemset((*data_grad)->devdata, Dtype(0.), count); hipLaunchKernelGGL(( APPLY_SPECIFIC(ROIAlignBackward)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, out_grad->devdata, argmaxes_x->devdata, argmaxes_y->devdata, batch_size, SPATIAL_SCALE, channels, height, width, POOLED_HEIGHT, POOLED_WIDTH, (*data_grad)->devdata, rois->devdata); CUDA_POST_KERNEL_CHECK; return 0; }
eb0a4297462d0abddd4eab7e232daf25e7bc4aff.cu
#section support_code_apply // ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include <cfloat> using std::max; using std::min; #define Dtype float // The following chunks are borrowed from Caffe. ------------------------------ // CUDA: various checks for different function calls. #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ return 1; \ } \ } while (0) // CUDA: grid stride looping #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) // CUDA: check for error after kernel execution and exit loudly if there is one. #define CUDA_POST_KERNEL_CHECK CUDA_CHECK(cudaPeekAtLastError()) // CUDA: use 512 threads per block const int CAFFE_CUDA_NUM_THREADS = 512; // CUDA: number of blocks for threads. inline int CAFFE_GET_BLOCKS(const int N) { return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; } // ---------------------------------------------------------------------------- __global__ void APPLY_SPECIFIC(ROIAlignForward)( const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, Dtype* argmax_data_x, Dtype* argmax_data_y) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = bottom_rois[3] * spatial_scale; Dtype roi_end_h = bottom_rois[4] * spatial_scale; Dtype roi_width = roi_end_w - roi_start_w + 1; Dtype roi_height = roi_end_h - roi_start_h + 1; Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); Dtype hstart = static_cast<Dtype>(ph) * bin_size_h; Dtype wstart = static_cast<Dtype>(pw) * bin_size_w; Dtype hend = static_cast<Dtype>(ph + 1) * bin_size_h; Dtype wend = static_cast<Dtype>(pw + 1) * bin_size_w; // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0.), static_cast<float>(height)); hend = min(max(hend + roi_start_h, 0.), static_cast<float>(height)); wstart = min(max(wstart + roi_start_w, 0.), static_cast<float>(width)); wend = min(max(wend + roi_start_w, 0.), static_cast<float>(width)); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd Dtype maxidx_x = -1; Dtype maxidx_y = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (Dtype h = hstart; h < hend; h += 1.) { for (Dtype w = wstart; w < wend; w += 1.) { // Selecting four regular locations for bilinear interpolation int x_left = floor(w); int y_bottom = floor(h); int x_right = x_left + 1; int y_top = y_bottom + 1; int top_left_index = y_top * width + x_left; int top_right_index = y_top * width + x_right; int bottom_left_index = y_bottom * width + x_left; int bottom_right_index = y_bottom * width + x_right; bool is_top_left_in = x_left >= 0 && x_left <= width - 1 && y_top >= 0 && y_top <= height - 1; bool is_top_right_in = x_right >= 0 && x_right <= width - 1 && y_top >= 0 && y_top <= height - 1; bool is_bottom_left_in = x_left >= 0 && x_left <= width - 1 && y_bottom >= 0 && y_bottom <= height - 1; bool is_bottom_right_in = x_right >= 0 && x_right <= width - 1 && y_bottom >= 0 && y_bottom <= height - 1; Dtype val = 0; if (is_top_left_in) val += (1 - w + x_left) * (h - y_bottom) * bottom_data[top_left_index]; if (is_top_right_in) val += (w - x_left) * (h - y_bottom) * bottom_data[top_right_index]; if (is_bottom_left_in) val += (1 - w + x_left) * (1 - h + y_bottom) * bottom_data[bottom_left_index]; if (is_bottom_right_in) val += (w - x_left) * (1 - h + y_bottom) * bottom_data[bottom_right_index]; if (val > maxval) { maxval = val; maxidx_x = w; maxidx_y = h; } } } top_data[index] = maxval; argmax_data_x[index] = maxidx_x; argmax_data_y[index] = maxidx_y; } } __global__ void APPLY_SPECIFIC(ROIAlignBackward)( const int nthreads, const Dtype* top_diff, const Dtype* argmax_data_x, const Dtype* argmax_data_y, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } // And it assumes that we don't have any negative offset of course int roi_start_w = floor(offset_bottom_rois[1] * spatial_scale); int roi_start_h = floor(offset_bottom_rois[2] * spatial_scale); int roi_end_w = ceil(offset_bottom_rois[3] * spatial_scale); int roi_end_h = ceil(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const Dtype* offset_argmax_data_x = argmax_data_x + offset; const Dtype* offset_argmax_data_y = argmax_data_y + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit Dtype roi_width = roi_end_w - roi_start_w + 1; Dtype roi_height = roi_end_h - roi_start_h + 1; Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); for (int ph = phstart; ph <= phend; ++ph) { for (int pw = pwstart; pw <= pwend; ++pw) { int index = ph * pooled_width + pw; Dtype max_x = offset_argmax_data_x[index]; Dtype max_y = offset_argmax_data_y[index]; int x_left = floor(max_x); int x_right = x_left + 1; int y_bottom = floor(max_y); int y_top = y_bottom + 1; if (x_left == w && y_top == h) gradient += (1 - max_x + x_left) * (1 - y_top + max_y) * offset_top_diff[index]; else if (x_left == w && y_bottom == h) gradient += (1 - max_x + x_left) * (1 - max_y + y_bottom) * offset_top_diff[index]; else if (x_right == w && y_top == h) gradient += (1 - x_right + max_x) * (1 - y_top + max_y) * offset_top_diff[index]; else if (x_right == w && y_bottom == h) gradient += (1 - x_right + max_x) * (1 - max_y + y_bottom) * offset_top_diff[index]; } } } bottom_diff[index] = gradient; } } int APPLY_SPECIFIC(Forward_gpu)(CudaNdarray* data, CudaNdarray* rois, CudaNdarray** out, CudaNdarray** argmaxes_x, CudaNdarray** argmaxes_y) { int batch_size = CudaNdarray_DIMS(rois)[0]; int channels = CudaNdarray_DIMS(data)[1]; int height = CudaNdarray_DIMS(data)[2]; int width = CudaNdarray_DIMS(data)[3]; // Prepare outputs. int dims[] = {0, 0, 0, 0}; dims[0] = batch_size; dims[1] = channels; dims[2] = POOLED_HEIGHT; dims[3] = POOLED_WIDTH; int count = batch_size * channels * POOLED_HEIGHT * POOLED_WIDTH; CudaNdarray_prep_output(out, 4, dims); CudaNdarray_prep_output(argmaxes_x, 4, dims); CudaNdarray_prep_output(argmaxes_y, 4, dims); APPLY_SPECIFIC(ROIAlignForward) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, data->devdata, SPATIAL_SCALE, channels, height, width, POOLED_HEIGHT, POOLED_WIDTH, rois->devdata, (*out)->devdata, (*argmaxes_x)->devdata, (*argmaxes_y)->devdata); CUDA_POST_KERNEL_CHECK; return 0; } int APPLY_SPECIFIC(Backward_gpu)(CudaNdarray* data, CudaNdarray* rois, CudaNdarray* argmaxes_x, CudaNdarray* argmaxes_y, CudaNdarray* out_grad, CudaNdarray** data_grad) { int count = CudaNdarray_SIZE(data); int batch_size = CudaNdarray_DIMS(rois)[0]; int channels = CudaNdarray_DIMS(data)[1]; int height = CudaNdarray_DIMS(data)[2]; int width = CudaNdarray_DIMS(data)[3]; // Prepare data grad. CudaNdarray_prep_output(data_grad, 4, CudaNdarray_DIMS(data)); cudaMemset((*data_grad)->devdata, Dtype(0.), count); APPLY_SPECIFIC(ROIAlignBackward) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, out_grad->devdata, argmaxes_x->devdata, argmaxes_y->devdata, batch_size, SPATIAL_SCALE, channels, height, width, POOLED_HEIGHT, POOLED_WIDTH, (*data_grad)->devdata, rois->devdata); CUDA_POST_KERNEL_CHECK; return 0; }
c5690fb4341a4ea2b294db063bd3126aab74054b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2020 Zhixu Zhao * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "tl_tensor_internal_cuda.h" static __global__ void convert_kernel(void *src, void *dst, tl_dtype dtype_s, tl_dtype dtype_d, int block_size, int total) { int di = blockIdx.x * block_size + threadIdx.x; if (di >= total) return; double val_d; float val_f; int32_t val_i32; uint32_t val_u32; int16_t val_i16; uint16_t val_u16; int8_t val_i8; uint8_t val_u8; switch (dtype_d) { case TL_DOUBLE: switch (dtype_s) { case TL_DOUBLE: ((double *)dst)[di] = ((double *)src)[di]; break; case TL_FLOAT: ((double *)dst)[di] = (double)((float *)src)[di]; break; case TL_INT32: ((double *)dst)[di] = (double)((int32_t *)src)[di]; break; case TL_INT16: ((double *)dst)[di] = (double)((int16_t *)src)[di]; break; case TL_INT8: ((double *)dst)[di] = (double)((int8_t *)src)[di]; break; case TL_UINT32: ((double *)dst)[di] = (double)((uint32_t *)src)[di]; break; case TL_UINT16: ((double *)dst)[di] = (double)((uint16_t *)src)[di]; break; case TL_UINT8: ((double *)dst)[di] = (double)((uint8_t *)src)[di]; break; case TL_BOOL: ((double *)dst)[di] = (double)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_FLOAT: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d >= FLT_MAX) ((float *)dst)[di] = FLT_MAX; else if (val_d <= -FLT_MAX) ((float *)dst)[di] = -FLT_MAX; else ((float *)dst)[di] = (float)val_d; break; case TL_FLOAT: ((float *)dst)[di] = ((float *)src)[di]; break; case TL_INT32: ((float *)dst)[di] = (float)((int32_t *)src)[di]; break; case TL_INT16: ((float *)dst)[di] = (float)((int16_t *)src)[di]; break; case TL_INT8: ((float *)dst)[di] = (float)((int8_t *)src)[di]; break; case TL_UINT32: ((float *)dst)[di] = (float)((uint32_t *)src)[di]; break; case TL_UINT16: ((float *)dst)[di] = (float)((uint16_t *)src)[di]; break; case TL_UINT8: ((float *)dst)[di] = (float)((uint8_t *)src)[di]; break; case TL_BOOL: ((float *)dst)[di] = (float)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_INT32: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d >= INT32_MAX) ((int32_t *)dst)[di] = INT32_MAX; else if (val_d <= INT32_MIN) ((int32_t *)dst)[di] = INT32_MIN; else ((int32_t *)dst)[di] = (int32_t)val_d; break; case TL_FLOAT: val_f = ((float *)src)[di]; if (val_f >= INT32_MAX) ((int32_t *)dst)[di] = INT32_MAX; else if (val_f <= INT32_MIN) ((int32_t *)dst)[di] = INT32_MIN; else ((int32_t *)dst)[di] = (int32_t)val_f; break; case TL_INT32: ((int32_t *)dst)[di] = ((int32_t *)src)[di]; break; case TL_INT16: ((int32_t *)dst)[di] = (int32_t)((int16_t *)src)[di]; break; case TL_INT8: ((int32_t *)dst)[di] = (int32_t)((int8_t *)src)[di]; break; case TL_UINT32: val_u32 = ((uint32_t *)src)[di]; if (val_u32 >= INT32_MAX) ((int32_t *)dst)[di] = INT32_MAX; else ((int32_t *)dst)[di] = (int32_t)val_u32; break; case TL_UINT16: ((int32_t *)dst)[di] = (int32_t)((uint16_t *)src)[di]; break; case TL_UINT8: ((int32_t *)dst)[di] = (int32_t)((uint8_t *)src)[di]; break; case TL_BOOL: ((int32_t *)dst)[di] = (int32_t)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_INT16: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d >= INT16_MAX) ((int16_t *)dst)[di] = INT16_MAX; else if (val_d <= INT16_MIN) ((int16_t *)dst)[di] = INT16_MIN; else ((int16_t *)dst)[di] = (int16_t)val_d; break; case TL_FLOAT: val_f = ((float *)src)[di]; if (val_f >= INT16_MAX) ((int16_t *)dst)[di] = INT16_MAX; else if (val_f <= INT16_MIN) ((int16_t *)dst)[di] = INT16_MIN; else ((int16_t *)dst)[di] = (int16_t)val_f; break; case TL_INT32: val_i32 = ((int32_t *)src)[di]; if (val_i32 >= INT16_MAX) ((int16_t *)dst)[di] = INT16_MAX; else if (val_i32 <= INT16_MIN) ((int16_t *)dst)[di] = INT16_MIN; else ((int16_t *)dst)[di] = (int16_t)val_i32; break; case TL_INT16: ((int16_t *)dst)[di] = ((int16_t *)src)[di]; break; case TL_INT8: ((int16_t *)dst)[di] = (int16_t)((int8_t *)src)[di]; break; case TL_UINT32: val_u32 = ((uint32_t *)src)[di]; if (val_u32 >= INT16_MAX) ((int16_t *)dst)[di] = INT16_MAX; else ((int16_t *)dst)[di] = (int16_t)val_u32; break; case TL_UINT16: val_u16 = ((uint16_t *)src)[di]; if (val_u16 >= INT16_MAX) ((int16_t *)dst)[di] = INT16_MAX; else ((int16_t *)dst)[di] = (int16_t)val_u16; break; case TL_UINT8: ((int16_t *)dst)[di] = (int16_t)((uint8_t *)src)[di]; break; case TL_BOOL: ((int16_t *)dst)[di] = (int16_t)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_INT8: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d >= INT8_MAX) ((int8_t *)dst)[di] = INT8_MAX; else if (val_d <= INT8_MIN) ((int8_t *)dst)[di] = INT8_MIN; else ((int8_t *)dst)[di] = (int8_t)val_d; break; case TL_FLOAT: val_f = ((float *)src)[di]; if (val_f >= INT8_MAX) ((int8_t *)dst)[di] = INT8_MAX; else if (val_f <= INT8_MIN) ((int8_t *)dst)[di] = INT8_MIN; else ((int8_t *)dst)[di] = (int8_t)val_f; break; case TL_INT32: val_i32 = ((int32_t *)src)[di]; if (val_i32 >= INT8_MAX) ((int8_t *)dst)[di] = INT8_MAX; else if (val_i32 <= INT8_MIN) ((int8_t *)dst)[di] = INT8_MIN; else ((int8_t *)dst)[di] = (int8_t)val_i32; break; case TL_INT16: val_i16 = ((int16_t *)src)[di]; if (val_i16 >= INT8_MAX) ((int8_t *)dst)[di] = INT8_MAX; else if (val_i16 <= INT8_MIN) ((int8_t *)dst)[di] = INT8_MIN; else ((int8_t *)dst)[di] = (int8_t)val_i16; break; case TL_INT8: ((int8_t *)dst)[di] = ((int8_t *)src)[di]; break; case TL_UINT32: val_u32 = ((uint32_t *)src)[di]; if (val_u32 >= INT8_MAX) ((int8_t *)dst)[di] = INT8_MAX; else ((int8_t *)dst)[di] = (int8_t)val_u32; break; case TL_UINT16: val_u16 = ((uint16_t *)src)[di]; if (val_u16 >= INT8_MAX) ((int8_t *)dst)[di] = INT8_MAX; else ((int8_t *)dst)[di] = (int8_t)val_u16; break; case TL_UINT8: val_u8 = ((uint8_t *)src)[di]; if (val_u8 >= INT8_MAX) ((int8_t *)dst)[di] = INT8_MAX; else ((int8_t *)dst)[di] = (int8_t)val_u8; break; case TL_BOOL: ((int8_t *)dst)[di] = (int8_t)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_UINT32: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d >= UINT32_MAX) ((uint32_t *)dst)[di] = UINT32_MAX; else if (val_d < 0) ((uint32_t *)dst)[di] = 0; else ((uint32_t *)dst)[di] = (uint32_t)val_d; break; case TL_FLOAT: val_f = ((float *)src)[di]; if (val_f >= UINT32_MAX) ((uint32_t *)dst)[di] = UINT32_MAX; else if (val_f < 0) ((uint32_t *)dst)[di] = 0; else ((uint32_t *)dst)[di] = (uint32_t)val_f; break; case TL_INT32: val_i32 = ((int32_t *)src)[di]; if (val_i32 >= 0) ((uint32_t *)dst)[di] = (uint32_t)val_i32; else ((uint32_t *)dst)[di] = 0; break; case TL_INT16: val_i16 = ((int16_t *)src)[di]; if (val_i16 >= 0) ((uint32_t *)dst)[di] = (uint32_t)val_i16; else ((uint32_t *)dst)[di] = 0; break; case TL_INT8: val_i8 = ((int8_t *)src)[di]; if (val_i8 >= 0) ((uint32_t *)dst)[di] = (uint32_t)val_i8; else ((uint32_t *)dst)[di] = 0; break; case TL_UINT32: ((uint32_t *)dst)[di] = ((uint32_t *)src)[di]; break; case TL_UINT16: ((uint32_t *)dst)[di] = (uint32_t)((uint16_t *)src)[di]; break; case TL_UINT8: ((uint32_t *)dst)[di] = (uint32_t)((uint8_t *)src)[di]; break; case TL_BOOL: ((uint32_t *)dst)[di] = (uint32_t)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_UINT16: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d >= UINT16_MAX) ((uint16_t *)dst)[di] = UINT16_MAX; else if (val_d < 0) ((uint16_t *)dst)[di] = 0; else ((uint16_t *)dst)[di] = (uint16_t)val_d; break; case TL_FLOAT: val_f = ((float *)src)[di]; if (val_f >= UINT16_MAX) ((uint16_t *)dst)[di] = UINT16_MAX; else if (val_f < 0) ((uint16_t *)dst)[di] = 0; else ((uint16_t *)dst)[di] = (uint16_t)val_f; break; case TL_INT32: val_i32 = ((int32_t *)src)[di]; if (val_i32 >= UINT16_MAX) ((uint16_t *)dst)[di] = UINT16_MAX; else if (val_i32 < 0) ((uint16_t *)dst)[di] = 0; else ((uint16_t *)dst)[di] = (uint16_t)val_i32; break; case TL_INT16: val_i16 = ((int16_t *)src)[di]; if (val_i16 >= 0) ((uint16_t *)dst)[di] = (uint16_t)val_i16; else ((uint16_t *)dst)[di] = 0; break; case TL_INT8: val_i8 = ((int8_t *)src)[di]; if (val_i8 >= 0) ((uint16_t *)dst)[di] = (uint16_t)val_i8; else ((uint16_t *)dst)[di] = 0; break; case TL_UINT32: val_u32 = ((uint32_t *)src)[di]; if (val_u32 >= UINT16_MAX) ((uint16_t *)dst)[di] = UINT16_MAX; else ((uint16_t *)dst)[di] = (uint16_t)val_u32; break; case TL_UINT16: ((uint16_t *)dst)[di] = ((uint16_t *)src)[di]; break; case TL_UINT8: ((uint16_t *)dst)[di] = (uint16_t)((uint8_t *)src)[di]; break; case TL_BOOL: ((uint16_t *)dst)[di] = (uint16_t)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_UINT8: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d >= UINT8_MAX) ((uint8_t *)dst)[di] = UINT8_MAX; else if (val_d < 0) ((uint8_t *)dst)[di] = 0; else ((uint8_t *)dst)[di] = (uint8_t)val_d; break; case TL_FLOAT: val_f = ((float *)src)[di]; if (val_f >= UINT8_MAX) ((uint8_t *)dst)[di] = UINT8_MAX; else if (val_f < 0) ((uint8_t *)dst)[di] = 0; else ((uint8_t *)dst)[di] = (uint8_t)val_f; break; case TL_INT32: val_i32 = ((int32_t *)src)[di]; if (val_i32 >= UINT8_MAX) ((uint8_t *)dst)[di] = UINT8_MAX; else if (val_i32 < 0) ((uint8_t *)dst)[di] = 0; else ((uint8_t *)dst)[di] = (uint8_t)val_i32; break; case TL_INT16: val_i16 = ((int16_t *)src)[di]; if (val_i16 >= UINT8_MAX) ((uint8_t *)dst)[di] = UINT8_MAX; else if (val_i16 < 0) ((uint8_t *)dst)[di] = 0; else ((uint8_t *)dst)[di] = (uint8_t)val_i16; break; case TL_INT8: val_i8 = ((int8_t *)src)[di]; if (val_i8 >= 0) ((uint8_t *)dst)[di] = (uint8_t)val_i8; else ((uint8_t *)dst)[di] = 0; break; case TL_UINT32: val_u32 = ((uint32_t *)src)[di]; if (val_u32 >= UINT8_MAX) ((uint8_t *)dst)[di] = UINT8_MAX; else ((uint8_t *)dst)[di] = (uint8_t)val_u32; break; case TL_UINT16: val_u16 = ((uint16_t *)src)[di]; if (val_u16 >= UINT8_MAX) ((uint8_t *)dst)[di] = UINT8_MAX; else ((uint8_t *)dst)[di] = (uint8_t)val_u16; break; case TL_UINT8: ((uint8_t *)dst)[di] = ((uint8_t *)src)[di]; break; case TL_BOOL: ((uint8_t *)dst)[di] = (uint8_t)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_BOOL: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d > 0 || val_d < 0) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_FLOAT: val_f = ((float *)src)[di]; if (val_f > 0 || val_f < 0) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_INT32: val_i32 = ((int32_t *)src)[di]; if (val_i32) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_INT16: val_i16 = ((int16_t *)src)[di]; if (val_i16) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_INT8: val_i8 = ((int8_t *)src)[di]; if (val_i8) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_UINT32: val_u32 = ((uint32_t *)src)[di]; if (val_u32) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_UINT16: val_u16 = ((uint16_t *)src)[di]; if (val_u16) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_UINT8: val_u8 = ((uint8_t *)src)[di]; if (val_u8) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_BOOL: ((tl_bool_t *)dst)[di] = ((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; default: assert(0 && "unsupported tl_dtype"); break; } } TL_EXPORT tl_tensor *tl_tensor_convert_cuda(const tl_tensor *src, tl_tensor *dst, tl_dtype dtype_d) { tl_dtype dtype_s; int thread_num, block_num; assert(src && tl_is_device_mem(src->data)); if (dst) { assert(tl_is_device_mem(dst->data)); assert(tl_tensor_issameshape(src, dst)); assert(dst->dtype == dtype_d); } else { dst = tl_tensor_zeros_cuda(src->ndim, src->dims, dtype_d); } dtype_s = src->dtype; thread_num = dst->len; block_num = BLOCK_NUM(BLOCK_SIZE, thread_num); hipLaunchKernelGGL(( convert_kernel), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, src->data, dst->data, dtype_s, dtype_d, BLOCK_SIZE, thread_num); tl_cuda_device_sync(); return dst; }
c5690fb4341a4ea2b294db063bd3126aab74054b.cu
/* * Copyright (c) 2018-2020 Zhixu Zhao * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "tl_tensor_internal_cuda.h" static __global__ void convert_kernel(void *src, void *dst, tl_dtype dtype_s, tl_dtype dtype_d, int block_size, int total) { int di = blockIdx.x * block_size + threadIdx.x; if (di >= total) return; double val_d; float val_f; int32_t val_i32; uint32_t val_u32; int16_t val_i16; uint16_t val_u16; int8_t val_i8; uint8_t val_u8; switch (dtype_d) { case TL_DOUBLE: switch (dtype_s) { case TL_DOUBLE: ((double *)dst)[di] = ((double *)src)[di]; break; case TL_FLOAT: ((double *)dst)[di] = (double)((float *)src)[di]; break; case TL_INT32: ((double *)dst)[di] = (double)((int32_t *)src)[di]; break; case TL_INT16: ((double *)dst)[di] = (double)((int16_t *)src)[di]; break; case TL_INT8: ((double *)dst)[di] = (double)((int8_t *)src)[di]; break; case TL_UINT32: ((double *)dst)[di] = (double)((uint32_t *)src)[di]; break; case TL_UINT16: ((double *)dst)[di] = (double)((uint16_t *)src)[di]; break; case TL_UINT8: ((double *)dst)[di] = (double)((uint8_t *)src)[di]; break; case TL_BOOL: ((double *)dst)[di] = (double)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_FLOAT: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d >= FLT_MAX) ((float *)dst)[di] = FLT_MAX; else if (val_d <= -FLT_MAX) ((float *)dst)[di] = -FLT_MAX; else ((float *)dst)[di] = (float)val_d; break; case TL_FLOAT: ((float *)dst)[di] = ((float *)src)[di]; break; case TL_INT32: ((float *)dst)[di] = (float)((int32_t *)src)[di]; break; case TL_INT16: ((float *)dst)[di] = (float)((int16_t *)src)[di]; break; case TL_INT8: ((float *)dst)[di] = (float)((int8_t *)src)[di]; break; case TL_UINT32: ((float *)dst)[di] = (float)((uint32_t *)src)[di]; break; case TL_UINT16: ((float *)dst)[di] = (float)((uint16_t *)src)[di]; break; case TL_UINT8: ((float *)dst)[di] = (float)((uint8_t *)src)[di]; break; case TL_BOOL: ((float *)dst)[di] = (float)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_INT32: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d >= INT32_MAX) ((int32_t *)dst)[di] = INT32_MAX; else if (val_d <= INT32_MIN) ((int32_t *)dst)[di] = INT32_MIN; else ((int32_t *)dst)[di] = (int32_t)val_d; break; case TL_FLOAT: val_f = ((float *)src)[di]; if (val_f >= INT32_MAX) ((int32_t *)dst)[di] = INT32_MAX; else if (val_f <= INT32_MIN) ((int32_t *)dst)[di] = INT32_MIN; else ((int32_t *)dst)[di] = (int32_t)val_f; break; case TL_INT32: ((int32_t *)dst)[di] = ((int32_t *)src)[di]; break; case TL_INT16: ((int32_t *)dst)[di] = (int32_t)((int16_t *)src)[di]; break; case TL_INT8: ((int32_t *)dst)[di] = (int32_t)((int8_t *)src)[di]; break; case TL_UINT32: val_u32 = ((uint32_t *)src)[di]; if (val_u32 >= INT32_MAX) ((int32_t *)dst)[di] = INT32_MAX; else ((int32_t *)dst)[di] = (int32_t)val_u32; break; case TL_UINT16: ((int32_t *)dst)[di] = (int32_t)((uint16_t *)src)[di]; break; case TL_UINT8: ((int32_t *)dst)[di] = (int32_t)((uint8_t *)src)[di]; break; case TL_BOOL: ((int32_t *)dst)[di] = (int32_t)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_INT16: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d >= INT16_MAX) ((int16_t *)dst)[di] = INT16_MAX; else if (val_d <= INT16_MIN) ((int16_t *)dst)[di] = INT16_MIN; else ((int16_t *)dst)[di] = (int16_t)val_d; break; case TL_FLOAT: val_f = ((float *)src)[di]; if (val_f >= INT16_MAX) ((int16_t *)dst)[di] = INT16_MAX; else if (val_f <= INT16_MIN) ((int16_t *)dst)[di] = INT16_MIN; else ((int16_t *)dst)[di] = (int16_t)val_f; break; case TL_INT32: val_i32 = ((int32_t *)src)[di]; if (val_i32 >= INT16_MAX) ((int16_t *)dst)[di] = INT16_MAX; else if (val_i32 <= INT16_MIN) ((int16_t *)dst)[di] = INT16_MIN; else ((int16_t *)dst)[di] = (int16_t)val_i32; break; case TL_INT16: ((int16_t *)dst)[di] = ((int16_t *)src)[di]; break; case TL_INT8: ((int16_t *)dst)[di] = (int16_t)((int8_t *)src)[di]; break; case TL_UINT32: val_u32 = ((uint32_t *)src)[di]; if (val_u32 >= INT16_MAX) ((int16_t *)dst)[di] = INT16_MAX; else ((int16_t *)dst)[di] = (int16_t)val_u32; break; case TL_UINT16: val_u16 = ((uint16_t *)src)[di]; if (val_u16 >= INT16_MAX) ((int16_t *)dst)[di] = INT16_MAX; else ((int16_t *)dst)[di] = (int16_t)val_u16; break; case TL_UINT8: ((int16_t *)dst)[di] = (int16_t)((uint8_t *)src)[di]; break; case TL_BOOL: ((int16_t *)dst)[di] = (int16_t)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_INT8: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d >= INT8_MAX) ((int8_t *)dst)[di] = INT8_MAX; else if (val_d <= INT8_MIN) ((int8_t *)dst)[di] = INT8_MIN; else ((int8_t *)dst)[di] = (int8_t)val_d; break; case TL_FLOAT: val_f = ((float *)src)[di]; if (val_f >= INT8_MAX) ((int8_t *)dst)[di] = INT8_MAX; else if (val_f <= INT8_MIN) ((int8_t *)dst)[di] = INT8_MIN; else ((int8_t *)dst)[di] = (int8_t)val_f; break; case TL_INT32: val_i32 = ((int32_t *)src)[di]; if (val_i32 >= INT8_MAX) ((int8_t *)dst)[di] = INT8_MAX; else if (val_i32 <= INT8_MIN) ((int8_t *)dst)[di] = INT8_MIN; else ((int8_t *)dst)[di] = (int8_t)val_i32; break; case TL_INT16: val_i16 = ((int16_t *)src)[di]; if (val_i16 >= INT8_MAX) ((int8_t *)dst)[di] = INT8_MAX; else if (val_i16 <= INT8_MIN) ((int8_t *)dst)[di] = INT8_MIN; else ((int8_t *)dst)[di] = (int8_t)val_i16; break; case TL_INT8: ((int8_t *)dst)[di] = ((int8_t *)src)[di]; break; case TL_UINT32: val_u32 = ((uint32_t *)src)[di]; if (val_u32 >= INT8_MAX) ((int8_t *)dst)[di] = INT8_MAX; else ((int8_t *)dst)[di] = (int8_t)val_u32; break; case TL_UINT16: val_u16 = ((uint16_t *)src)[di]; if (val_u16 >= INT8_MAX) ((int8_t *)dst)[di] = INT8_MAX; else ((int8_t *)dst)[di] = (int8_t)val_u16; break; case TL_UINT8: val_u8 = ((uint8_t *)src)[di]; if (val_u8 >= INT8_MAX) ((int8_t *)dst)[di] = INT8_MAX; else ((int8_t *)dst)[di] = (int8_t)val_u8; break; case TL_BOOL: ((int8_t *)dst)[di] = (int8_t)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_UINT32: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d >= UINT32_MAX) ((uint32_t *)dst)[di] = UINT32_MAX; else if (val_d < 0) ((uint32_t *)dst)[di] = 0; else ((uint32_t *)dst)[di] = (uint32_t)val_d; break; case TL_FLOAT: val_f = ((float *)src)[di]; if (val_f >= UINT32_MAX) ((uint32_t *)dst)[di] = UINT32_MAX; else if (val_f < 0) ((uint32_t *)dst)[di] = 0; else ((uint32_t *)dst)[di] = (uint32_t)val_f; break; case TL_INT32: val_i32 = ((int32_t *)src)[di]; if (val_i32 >= 0) ((uint32_t *)dst)[di] = (uint32_t)val_i32; else ((uint32_t *)dst)[di] = 0; break; case TL_INT16: val_i16 = ((int16_t *)src)[di]; if (val_i16 >= 0) ((uint32_t *)dst)[di] = (uint32_t)val_i16; else ((uint32_t *)dst)[di] = 0; break; case TL_INT8: val_i8 = ((int8_t *)src)[di]; if (val_i8 >= 0) ((uint32_t *)dst)[di] = (uint32_t)val_i8; else ((uint32_t *)dst)[di] = 0; break; case TL_UINT32: ((uint32_t *)dst)[di] = ((uint32_t *)src)[di]; break; case TL_UINT16: ((uint32_t *)dst)[di] = (uint32_t)((uint16_t *)src)[di]; break; case TL_UINT8: ((uint32_t *)dst)[di] = (uint32_t)((uint8_t *)src)[di]; break; case TL_BOOL: ((uint32_t *)dst)[di] = (uint32_t)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_UINT16: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d >= UINT16_MAX) ((uint16_t *)dst)[di] = UINT16_MAX; else if (val_d < 0) ((uint16_t *)dst)[di] = 0; else ((uint16_t *)dst)[di] = (uint16_t)val_d; break; case TL_FLOAT: val_f = ((float *)src)[di]; if (val_f >= UINT16_MAX) ((uint16_t *)dst)[di] = UINT16_MAX; else if (val_f < 0) ((uint16_t *)dst)[di] = 0; else ((uint16_t *)dst)[di] = (uint16_t)val_f; break; case TL_INT32: val_i32 = ((int32_t *)src)[di]; if (val_i32 >= UINT16_MAX) ((uint16_t *)dst)[di] = UINT16_MAX; else if (val_i32 < 0) ((uint16_t *)dst)[di] = 0; else ((uint16_t *)dst)[di] = (uint16_t)val_i32; break; case TL_INT16: val_i16 = ((int16_t *)src)[di]; if (val_i16 >= 0) ((uint16_t *)dst)[di] = (uint16_t)val_i16; else ((uint16_t *)dst)[di] = 0; break; case TL_INT8: val_i8 = ((int8_t *)src)[di]; if (val_i8 >= 0) ((uint16_t *)dst)[di] = (uint16_t)val_i8; else ((uint16_t *)dst)[di] = 0; break; case TL_UINT32: val_u32 = ((uint32_t *)src)[di]; if (val_u32 >= UINT16_MAX) ((uint16_t *)dst)[di] = UINT16_MAX; else ((uint16_t *)dst)[di] = (uint16_t)val_u32; break; case TL_UINT16: ((uint16_t *)dst)[di] = ((uint16_t *)src)[di]; break; case TL_UINT8: ((uint16_t *)dst)[di] = (uint16_t)((uint8_t *)src)[di]; break; case TL_BOOL: ((uint16_t *)dst)[di] = (uint16_t)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_UINT8: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d >= UINT8_MAX) ((uint8_t *)dst)[di] = UINT8_MAX; else if (val_d < 0) ((uint8_t *)dst)[di] = 0; else ((uint8_t *)dst)[di] = (uint8_t)val_d; break; case TL_FLOAT: val_f = ((float *)src)[di]; if (val_f >= UINT8_MAX) ((uint8_t *)dst)[di] = UINT8_MAX; else if (val_f < 0) ((uint8_t *)dst)[di] = 0; else ((uint8_t *)dst)[di] = (uint8_t)val_f; break; case TL_INT32: val_i32 = ((int32_t *)src)[di]; if (val_i32 >= UINT8_MAX) ((uint8_t *)dst)[di] = UINT8_MAX; else if (val_i32 < 0) ((uint8_t *)dst)[di] = 0; else ((uint8_t *)dst)[di] = (uint8_t)val_i32; break; case TL_INT16: val_i16 = ((int16_t *)src)[di]; if (val_i16 >= UINT8_MAX) ((uint8_t *)dst)[di] = UINT8_MAX; else if (val_i16 < 0) ((uint8_t *)dst)[di] = 0; else ((uint8_t *)dst)[di] = (uint8_t)val_i16; break; case TL_INT8: val_i8 = ((int8_t *)src)[di]; if (val_i8 >= 0) ((uint8_t *)dst)[di] = (uint8_t)val_i8; else ((uint8_t *)dst)[di] = 0; break; case TL_UINT32: val_u32 = ((uint32_t *)src)[di]; if (val_u32 >= UINT8_MAX) ((uint8_t *)dst)[di] = UINT8_MAX; else ((uint8_t *)dst)[di] = (uint8_t)val_u32; break; case TL_UINT16: val_u16 = ((uint16_t *)src)[di]; if (val_u16 >= UINT8_MAX) ((uint8_t *)dst)[di] = UINT8_MAX; else ((uint8_t *)dst)[di] = (uint8_t)val_u16; break; case TL_UINT8: ((uint8_t *)dst)[di] = ((uint8_t *)src)[di]; break; case TL_BOOL: ((uint8_t *)dst)[di] = (uint8_t)((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; case TL_BOOL: switch (dtype_s) { case TL_DOUBLE: val_d = ((double *)src)[di]; if (val_d > 0 || val_d < 0) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_FLOAT: val_f = ((float *)src)[di]; if (val_f > 0 || val_f < 0) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_INT32: val_i32 = ((int32_t *)src)[di]; if (val_i32) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_INT16: val_i16 = ((int16_t *)src)[di]; if (val_i16) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_INT8: val_i8 = ((int8_t *)src)[di]; if (val_i8) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_UINT32: val_u32 = ((uint32_t *)src)[di]; if (val_u32) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_UINT16: val_u16 = ((uint16_t *)src)[di]; if (val_u16) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_UINT8: val_u8 = ((uint8_t *)src)[di]; if (val_u8) ((tl_bool_t *)dst)[di] = TL_TRUE; else ((tl_bool_t *)dst)[di] = TL_FALSE; break; case TL_BOOL: ((tl_bool_t *)dst)[di] = ((tl_bool_t *)src)[di]; break; default: assert(0 && "unsupported tl_dtype"); break; } break; default: assert(0 && "unsupported tl_dtype"); break; } } TL_EXPORT tl_tensor *tl_tensor_convert_cuda(const tl_tensor *src, tl_tensor *dst, tl_dtype dtype_d) { tl_dtype dtype_s; int thread_num, block_num; assert(src && tl_is_device_mem(src->data)); if (dst) { assert(tl_is_device_mem(dst->data)); assert(tl_tensor_issameshape(src, dst)); assert(dst->dtype == dtype_d); } else { dst = tl_tensor_zeros_cuda(src->ndim, src->dims, dtype_d); } dtype_s = src->dtype; thread_num = dst->len; block_num = BLOCK_NUM(BLOCK_SIZE, thread_num); convert_kernel<<<block_num, BLOCK_SIZE>>>(src->data, dst->data, dtype_s, dtype_d, BLOCK_SIZE, thread_num); tl_cuda_device_sync(); return dst; }
da02af00554fb0fb435e2b79606b8be95e97286b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void imgBlur(float* imgIn, float* imgOut, int imageWidth, int imageHeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if(idx<imageWidth && idy<imageHeight) { float sum = imgIn[idx*imageWidth+idy]; if(idx>0 && idy>0) sum += imgIn[(idx-1)*imageWidth+(idy-1)]; if(idx>0) sum += imgIn[(idx-1)*imageWidth+idy]; if(idx<imageWidth-1) sum += imgIn[(idx+1)*imageWidth+idy]; if(idx<imageWidth-1 && idy<imageHeight-1) sum += imgIn[(idx+1)*imageWidth+idy+1]; if(idx<imageWidth && idy>0) sum += imgIn[(idx+1)*imageWidth+idy-1]; if(idy>0) sum += imgIn[idx*imageWidth+idy-1]; if(idy<imageHeight) sum += imgIn[idx*imageWidth+idy+1]; if(idx>0 && idy<imageHeight) sum += imgIn[(idx-1)*imageWidth+idy+1]; imgOut[idx*imageWidth+idy] = sum / (float)(BLUR_SIZE*BLUR_SIZE); } }
da02af00554fb0fb435e2b79606b8be95e97286b.cu
#include "includes.h" __global__ void imgBlur(float* imgIn, float* imgOut, int imageWidth, int imageHeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if(idx<imageWidth && idy<imageHeight) { float sum = imgIn[idx*imageWidth+idy]; if(idx>0 && idy>0) sum += imgIn[(idx-1)*imageWidth+(idy-1)]; if(idx>0) sum += imgIn[(idx-1)*imageWidth+idy]; if(idx<imageWidth-1) sum += imgIn[(idx+1)*imageWidth+idy]; if(idx<imageWidth-1 && idy<imageHeight-1) sum += imgIn[(idx+1)*imageWidth+idy+1]; if(idx<imageWidth && idy>0) sum += imgIn[(idx+1)*imageWidth+idy-1]; if(idy>0) sum += imgIn[idx*imageWidth+idy-1]; if(idy<imageHeight) sum += imgIn[idx*imageWidth+idy+1]; if(idx>0 && idy<imageHeight) sum += imgIn[(idx-1)*imageWidth+idy+1]; imgOut[idx*imageWidth+idy] = sum / (float)(BLUR_SIZE*BLUR_SIZE); } }
cdd60ac6934d5e6dd9752b391b826b7d11b75296.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void hello_world(float *a, float *b) { int tx = threadIdx.x; b[tx] = a[tx]; }
cdd60ac6934d5e6dd9752b391b826b7d11b75296.cu
extern "C" __global__ void hello_world(float *a, float *b) { int tx = threadIdx.x; b[tx] = a[tx]; }
1c04690c5d1fe1940d5ce89b39e758624299a884.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define thread_num 16 /*inline void CUDA_ERROR_CHECK(const hipError_t &err){ if(err != hipSuccess){ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } }*/ __device__ int mandel(float x, float y, int maxIterations){ float zx, zy, newzx, newzy, a, b; zx = x; zy = y; int i = 0; for(i = 0 ; i < maxIterations ; ++i){ a = zx*zx; b = zy*zy; if(a + b > 4.0f) break; newzx = a - b; newzy = 2.f * zx * zy; zx = newzx + x; zy = newzy + y; } return i; } __global__ void mandelKernel(float lowerX, float lowerY, float stepX, float stepY, int *d_res, int resX, int resY, int maxIterations, size_t x_pixels, size_t y_pixels){ // To avoid error caused by the floating number, use the following pseudo code // // float x = lowerX + thisX * stepX; // float y = lowerY + thisY * stepY; int now_x, now_y, i, j, idx; now_x = (blockIdx.x * blockDim.x + threadIdx.x)*x_pixels; now_y = (blockIdx.y * blockDim.y + threadIdx.y)*y_pixels; //now_x = (blockIdx.x * blockDim.x + threadIdx.x); //now_y = (blockIdx.y * blockDim.y + threadIdx.y); float x, y; /*for(i = now_y ; i < resY/thread_num ; i += blockDim.y*y_pixels){ for(j = now_x ; j < resX/thread_num ; j += blockDim.x*x_pixels){ x = lowerX + i * resY; y = lowerY + j * resY; idx = j*resX+i; d_res[idx] = mandel(x, y, maxIterations); } }*/ if(now_x >= resX || now_y >= resY) return; for(j = now_y ; j < now_y+y_pixels ; j++){ if(j >= resY) return; for(i = now_x ; i < now_x+x_pixels ; i++){ if(i >= resX) continue; x = lowerX + i * stepX; y = lowerY + j * stepY; idx = j*resX+i; d_res[idx] = mandel(x, y, maxIterations); } } } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; int *d_res, *h; int x_pixels = 2, y_pixels = 2; size_t pitch; hipMallocPitch((void**)&d_res, &pitch, resX*sizeof(int), resY); int blocksX = (int) ceil(resX/(float)thread_num); int blocksY = (int) ceil(resY/(float)thread_num); dim3 block(thread_num/x_pixels, thread_num/y_pixels); dim3 grid(blocksX, blocksY); int size; size = resX*resY*sizeof(int); hipHostMalloc((void**)&h, size, hipHostMallocMapped); hipLaunchKernelGGL(( mandelKernel) , dim3(grid), dim3(block) , 0, 0, lowerX, lowerY, stepX, stepY, d_res, resX, resY, maxIterations, x_pixels, y_pixels); hipDeviceSynchronize(); hipMemcpy(h, d_res, size, hipMemcpyDeviceToHost); memcpy(img, h, size); hipHostFree(h); hipFree(d_res); }
1c04690c5d1fe1940d5ce89b39e758624299a884.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #define thread_num 16 /*inline void CUDA_ERROR_CHECK(const cudaError_t &err){ if(err != cudaSuccess){ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } }*/ __device__ int mandel(float x, float y, int maxIterations){ float zx, zy, newzx, newzy, a, b; zx = x; zy = y; int i = 0; for(i = 0 ; i < maxIterations ; ++i){ a = zx*zx; b = zy*zy; if(a + b > 4.0f) break; newzx = a - b; newzy = 2.f * zx * zy; zx = newzx + x; zy = newzy + y; } return i; } __global__ void mandelKernel(float lowerX, float lowerY, float stepX, float stepY, int *d_res, int resX, int resY, int maxIterations, size_t x_pixels, size_t y_pixels){ // To avoid error caused by the floating number, use the following pseudo code // // float x = lowerX + thisX * stepX; // float y = lowerY + thisY * stepY; int now_x, now_y, i, j, idx; now_x = (blockIdx.x * blockDim.x + threadIdx.x)*x_pixels; now_y = (blockIdx.y * blockDim.y + threadIdx.y)*y_pixels; //now_x = (blockIdx.x * blockDim.x + threadIdx.x); //now_y = (blockIdx.y * blockDim.y + threadIdx.y); float x, y; /*for(i = now_y ; i < resY/thread_num ; i += blockDim.y*y_pixels){ for(j = now_x ; j < resX/thread_num ; j += blockDim.x*x_pixels){ x = lowerX + i * resY; y = lowerY + j * resY; idx = j*resX+i; d_res[idx] = mandel(x, y, maxIterations); } }*/ if(now_x >= resX || now_y >= resY) return; for(j = now_y ; j < now_y+y_pixels ; j++){ if(j >= resY) return; for(i = now_x ; i < now_x+x_pixels ; i++){ if(i >= resX) continue; x = lowerX + i * stepX; y = lowerY + j * stepY; idx = j*resX+i; d_res[idx] = mandel(x, y, maxIterations); } } } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; int *d_res, *h; int x_pixels = 2, y_pixels = 2; size_t pitch; cudaMallocPitch((void**)&d_res, &pitch, resX*sizeof(int), resY); int blocksX = (int) ceil(resX/(float)thread_num); int blocksY = (int) ceil(resY/(float)thread_num); dim3 block(thread_num/x_pixels, thread_num/y_pixels); dim3 grid(blocksX, blocksY); int size; size = resX*resY*sizeof(int); cudaHostAlloc((void**)&h, size, cudaHostAllocMapped); mandelKernel <<< grid, block >>> (lowerX, lowerY, stepX, stepY, d_res, resX, resY, maxIterations, x_pixels, y_pixels); cudaDeviceSynchronize(); cudaMemcpy(h, d_res, size, cudaMemcpyDeviceToHost); memcpy(img, h, size); cudaFreeHost(h); cudaFree(d_res); }
43fa47b8741cc59d592103c121a36c160ec80920.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" #include "opencv2/core/cuda/limits.hpp" #include "opencv2/core/cuda/reduce.hpp" #include "opencv2/core/cuda/functional.hpp" #include "stereocsbp.hpp" namespace cv { namespace cuda { namespace device { namespace stereocsbp { /////////////////////////////////////////////////////////////// /////////////////////// init data cost //////////////////////// /////////////////////////////////////////////////////////////// template <int channels> static float __device__ pixeldiff(const uchar* left, const uchar* right, float max_data_term); template<> __device__ __forceinline__ float pixeldiff<1>(const uchar* left, const uchar* right, float max_data_term) { return fminf( ::abs((int)*left - *right), max_data_term); } template<> __device__ __forceinline__ float pixeldiff<3>(const uchar* left, const uchar* right, float max_data_term) { float tb = 0.114f * ::abs((int)left[0] - right[0]); float tg = 0.587f * ::abs((int)left[1] - right[1]); float tr = 0.299f * ::abs((int)left[2] - right[2]); return fminf(tr + tg + tb, max_data_term); } template<> __device__ __forceinline__ float pixeldiff<4>(const uchar* left, const uchar* right, float max_data_term) { uchar4 l = *((const uchar4*)left); uchar4 r = *((const uchar4*)right); float tb = 0.114f * ::abs((int)l.x - r.x); float tg = 0.587f * ::abs((int)l.y - r.y); float tr = 0.299f * ::abs((int)l.z - r.z); return fminf(tr + tg + tb, max_data_term); } template <typename T> __global__ void get_first_k_initial_global(uchar *ctemp, T* data_cost_selected_, T *selected_disp_pyr, int h, int w, int nr_plane, int ndisp, size_t msg_step, size_t disp_step) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { T* selected_disparity = selected_disp_pyr + y * msg_step + x; T* data_cost_selected = data_cost_selected_ + y * msg_step + x; T* data_cost = (T*)ctemp + y * msg_step + x; for(int i = 0; i < nr_plane; i++) { T minimum = device::numeric_limits<T>::max(); int id = 0; for(int d = 0; d < ndisp; d++) { T cur = data_cost[d * disp_step]; if(cur < minimum) { minimum = cur; id = d; } } data_cost_selected[i * disp_step] = minimum; selected_disparity[i * disp_step] = id; data_cost [id * disp_step] = numeric_limits<T>::max(); } } } template <typename T> __global__ void get_first_k_initial_local(uchar *ctemp, T* data_cost_selected_, T* selected_disp_pyr, int h, int w, int nr_plane, int ndisp, size_t msg_step, size_t disp_step) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { T* selected_disparity = selected_disp_pyr + y * msg_step + x; T* data_cost_selected = data_cost_selected_ + y * msg_step + x; T* data_cost = (T*)ctemp + y * msg_step + x; int nr_local_minimum = 0; T prev = data_cost[0 * disp_step]; T cur = data_cost[1 * disp_step]; T next = data_cost[2 * disp_step]; for (int d = 1; d < ndisp - 1 && nr_local_minimum < nr_plane; d++) { if (cur < prev && cur < next) { data_cost_selected[nr_local_minimum * disp_step] = cur; selected_disparity[nr_local_minimum * disp_step] = d; data_cost[d * disp_step] = numeric_limits<T>::max(); nr_local_minimum++; } prev = cur; cur = next; next = data_cost[(d + 1) * disp_step]; } for (int i = nr_local_minimum; i < nr_plane; i++) { T minimum = numeric_limits<T>::max(); int id = 0; for (int d = 0; d < ndisp; d++) { cur = data_cost[d * disp_step]; if (cur < minimum) { minimum = cur; id = d; } } data_cost_selected[i * disp_step] = minimum; selected_disparity[i * disp_step] = id; data_cost[id * disp_step] = numeric_limits<T>::max(); } } } template <typename T, int channels> __global__ void init_data_cost(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int h, int w, int level, int ndisp, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { int y0 = y << level; int yt = (y + 1) << level; int x0 = x << level; int xt = (x + 1) << level; T* data_cost = (T*)ctemp + y * msg_step + x; for(int d = 0; d < ndisp; ++d) { float val = 0.0f; for(int yi = y0; yi < yt; yi++) { for(int xi = x0; xi < xt; xi++) { int xr = xi - d; if(d < min_disp || xr < 0) val += data_weight * max_data_term; else { const uchar* lle = cleft + yi * cimg_step + xi * channels; const uchar* lri = cright + yi * cimg_step + xr * channels; val += data_weight * pixeldiff<channels>(lle, lri, max_data_term); } } } data_cost[disp_step * d] = saturate_cast<T>(val); } } } template <typename T, int winsz, int channels> __global__ void init_data_cost_reduce(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int level, int rows, int cols, int h, int ndisp, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step) { int x_out = blockIdx.x; int y_out = blockIdx.y % h; int d = (blockIdx.y / h) * blockDim.z + threadIdx.z; int tid = threadIdx.x; if (d < ndisp) { int x0 = x_out << level; int y0 = y_out << level; int len = ::min(y0 + winsz, rows) - y0; float val = 0.0f; if (x0 + tid < cols) { if (x0 + tid - d < 0 || d < min_disp) val = data_weight * max_data_term * len; else { const uchar* lle = cleft + y0 * cimg_step + channels * (x0 + tid ); const uchar* lri = cright + y0 * cimg_step + channels * (x0 + tid - d); for(int y = 0; y < len; ++y) { val += data_weight * pixeldiff<channels>(lle, lri, max_data_term); lle += cimg_step; lri += cimg_step; } } } extern __shared__ float smem[]; reduce<winsz>(smem + winsz * threadIdx.z, val, tid, plus<float>()); T* data_cost = (T*)ctemp + y_out * msg_step + x_out; if (tid == 0) data_cost[disp_step * d] = saturate_cast<T>(val); } } template <typename T> void init_data_cost_caller_(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int /*rows*/, int /*cols*/, int h, int w, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, hipStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); switch (channels) { case 1:hipLaunchKernelGGL(( init_data_cost<T, 1>), dim3(grid), dim3(threads), 0, stream, cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; case 3:hipLaunchKernelGGL(( init_data_cost<T, 3>), dim3(grid), dim3(threads), 0, stream, cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; case 4:hipLaunchKernelGGL(( init_data_cost<T, 4>), dim3(grid), dim3(threads), 0, stream, cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count"); } } template <typename T, int winsz> void init_data_cost_reduce_caller_(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, int h, int w, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, hipStream_t stream) { const int threadsNum = 256; const size_t smem_size = threadsNum * sizeof(float); dim3 threads(winsz, 1, threadsNum / winsz); dim3 grid(w, h, 1); grid.y *= divUp(ndisp, threads.z); switch (channels) { case 1:hipLaunchKernelGGL(( init_data_cost_reduce<T, winsz, 1>), dim3(grid), dim3(threads), smem_size, stream, cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; case 3:hipLaunchKernelGGL(( init_data_cost_reduce<T, winsz, 3>), dim3(grid), dim3(threads), smem_size, stream, cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; case 4:hipLaunchKernelGGL(( init_data_cost_reduce<T, winsz, 4>), dim3(grid), dim3(threads), smem_size, stream, cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count"); } } template<class T> void init_data_cost(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, T* disp_selected_pyr, T* data_cost_selected, size_t msg_step, int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, hipStream_t stream) { typedef void (*InitDataCostCaller)(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int cols, int rows, int w, int h, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, hipStream_t stream); static const InitDataCostCaller init_data_cost_callers[] = { init_data_cost_caller_<T>, init_data_cost_caller_<T>, init_data_cost_reduce_caller_<T, 4>, init_data_cost_reduce_caller_<T, 8>, init_data_cost_reduce_caller_<T, 16>, init_data_cost_reduce_caller_<T, 32>, init_data_cost_reduce_caller_<T, 64>, init_data_cost_reduce_caller_<T, 128>, init_data_cost_reduce_caller_<T, 256> }; size_t disp_step = msg_step * h; init_data_cost_callers[level](cleft, cright, ctemp, cimg_step, rows, cols, h, w, level, ndisp, channels, data_weight, max_data_term, min_disp, msg_step, disp_step, stream); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); if (use_local_init_data_cost == true) hipLaunchKernelGGL(( get_first_k_initial_local), dim3(grid), dim3(threads), 0, stream, ctemp, data_cost_selected, disp_selected_pyr, h, w, nr_plane, ndisp, msg_step, disp_step); else hipLaunchKernelGGL(( get_first_k_initial_global), dim3(grid), dim3(threads), 0, stream, ctemp, data_cost_selected, disp_selected_pyr, h, w, nr_plane, ndisp, msg_step, disp_step); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void init_data_cost<short>(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, short* disp_selected_pyr, short* data_cost_selected, size_t msg_step, int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, hipStream_t stream); template void init_data_cost<float>(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, float* disp_selected_pyr, float* data_cost_selected, size_t msg_step, int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, hipStream_t stream); /////////////////////////////////////////////////////////////// ////////////////////// compute data cost ////////////////////// /////////////////////////////////////////////////////////////// template <typename T, int channels> __global__ void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* selected_disp_pyr, T* data_cost_, int h, int w, int level, int nr_plane, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { int y0 = y << level; int yt = (y + 1) << level; int x0 = x << level; int xt = (x + 1) << level; const T* selected_disparity = selected_disp_pyr + y/2 * msg_step + x/2; T* data_cost = data_cost_ + y * msg_step + x; for(int d = 0; d < nr_plane; d++) { float val = 0.0f; for(int yi = y0; yi < yt; yi++) { for(int xi = x0; xi < xt; xi++) { int sel_disp = selected_disparity[d * disp_step2]; int xr = xi - sel_disp; if (xr < 0 || sel_disp < min_disp) val += data_weight * max_data_term; else { const uchar* left_x = cleft + yi * cimg_step + xi * channels; const uchar* right_x = cright + yi * cimg_step + xr * channels; val += data_weight * pixeldiff<channels>(left_x, right_x, max_data_term); } } } data_cost[disp_step1 * d] = saturate_cast<T>(val); } } } template <typename T, int winsz, int channels> __global__ void compute_data_cost_reduce(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* selected_disp_pyr, T* data_cost_, int level, int rows, int cols, int h, int nr_plane, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2) { int x_out = blockIdx.x; int y_out = blockIdx.y % h; int d = (blockIdx.y / h) * blockDim.z + threadIdx.z; int tid = threadIdx.x; const T* selected_disparity = selected_disp_pyr + y_out/2 * msg_step + x_out/2; T* data_cost = data_cost_ + y_out * msg_step + x_out; if (d < nr_plane) { int sel_disp = selected_disparity[d * disp_step2]; int x0 = x_out << level; int y0 = y_out << level; int len = ::min(y0 + winsz, rows) - y0; float val = 0.0f; if (x0 + tid < cols) { if (x0 + tid - sel_disp < 0 || sel_disp < min_disp) val = data_weight * max_data_term * len; else { const uchar* lle = cleft + y0 * cimg_step + channels * (x0 + tid ); const uchar* lri = cright + y0 * cimg_step + channels * (x0 + tid - sel_disp); for(int y = 0; y < len; ++y) { val += data_weight * pixeldiff<channels>(lle, lri, max_data_term); lle += cimg_step; lri += cimg_step; } } } extern __shared__ float smem[]; reduce<winsz>(smem + winsz * threadIdx.z, val, tid, plus<float>()); if (tid == 0) data_cost[disp_step1 * d] = saturate_cast<T>(val); } } template <typename T> void compute_data_cost_caller_(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int /*rows*/, int /*cols*/, int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, hipStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); switch(channels) { case 1:hipLaunchKernelGGL(( compute_data_cost<T, 1>), dim3(grid), dim3(threads), 0, stream, cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; case 3:hipLaunchKernelGGL(( compute_data_cost<T, 3>), dim3(grid), dim3(threads), 0, stream, cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; case 4:hipLaunchKernelGGL(( compute_data_cost<T, 4>), dim3(grid), dim3(threads), 0, stream, cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count"); } } template <typename T, int winsz> void compute_data_cost_reduce_caller_(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int rows, int cols, int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, hipStream_t stream) { const int threadsNum = 256; const size_t smem_size = threadsNum * sizeof(float); dim3 threads(winsz, 1, threadsNum / winsz); dim3 grid(w, h, 1); grid.y *= divUp(nr_plane, threads.z); switch (channels) { case 1:hipLaunchKernelGGL(( compute_data_cost_reduce<T, winsz, 1>), dim3(grid), dim3(threads), smem_size, stream, cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; case 3:hipLaunchKernelGGL(( compute_data_cost_reduce<T, winsz, 3>), dim3(grid), dim3(threads), smem_size, stream, cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; case 4:hipLaunchKernelGGL(( compute_data_cost_reduce<T, winsz, 4>), dim3(grid), dim3(threads), smem_size, stream, cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count"); } } template<class T> void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, size_t msg_step, int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, hipStream_t stream) { typedef void (*ComputeDataCostCaller)(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int rows, int cols, int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, hipStream_t stream); static const ComputeDataCostCaller callers[] = { compute_data_cost_caller_<T>, compute_data_cost_caller_<T>, compute_data_cost_reduce_caller_<T, 4>, compute_data_cost_reduce_caller_<T, 8>, compute_data_cost_reduce_caller_<T, 16>, compute_data_cost_reduce_caller_<T, 32>, compute_data_cost_reduce_caller_<T, 64>, compute_data_cost_reduce_caller_<T, 128>, compute_data_cost_reduce_caller_<T, 256> }; size_t disp_step1 = msg_step * h; size_t disp_step2 = msg_step * h2; callers[level](cleft, cright, cimg_step, disp_selected_pyr, data_cost, rows, cols, h, w, level, nr_plane, channels, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2, stream); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const short* disp_selected_pyr, short* data_cost, size_t msg_step, int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, hipStream_t stream); template void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const float* disp_selected_pyr, float* data_cost, size_t msg_step, int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, hipStream_t stream); /////////////////////////////////////////////////////////////// //////////////////////// init message ///////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __device__ void get_first_k_element_increase(T* u_new, T* d_new, T* l_new, T* r_new, const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur, T* data_cost_selected, T* disparity_selected_new, T* data_cost_new, const T* data_cost_cur, const T* disparity_selected_cur, int nr_plane, int nr_plane2, size_t disp_step1, size_t disp_step2) { for(int i = 0; i < nr_plane; i++) { T minimum = numeric_limits<T>::max(); int id = 0; for(int j = 0; j < nr_plane2; j++) { T cur = data_cost_new[j * disp_step1]; if(cur < minimum) { minimum = cur; id = j; } } data_cost_selected[i * disp_step1] = data_cost_cur[id * disp_step1]; disparity_selected_new[i * disp_step1] = disparity_selected_cur[id * disp_step2]; u_new[i * disp_step1] = u_cur[id * disp_step2]; d_new[i * disp_step1] = d_cur[id * disp_step2]; l_new[i * disp_step1] = l_cur[id * disp_step2]; r_new[i * disp_step1] = r_cur[id * disp_step2]; data_cost_new[id * disp_step1] = numeric_limits<T>::max(); } } template <typename T> __global__ void init_message(uchar *ctemp, T* u_new_, T* d_new_, T* l_new_, T* r_new_, const T* u_cur_, const T* d_cur_, const T* l_cur_, const T* r_cur_, T* selected_disp_pyr_new, const T* selected_disp_pyr_cur, T* data_cost_selected_, const T* data_cost_, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, size_t msg_step, size_t disp_step1, size_t disp_step2) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { const T* u_cur = u_cur_ + ::min(h2-1, y/2 + 1) * msg_step + x/2; const T* d_cur = d_cur_ + ::max(0, y/2 - 1) * msg_step + x/2; const T* l_cur = l_cur_ + (y/2) * msg_step + ::min(w2-1, x/2 + 1); const T* r_cur = r_cur_ + (y/2) * msg_step + ::max(0, x/2 - 1); T* data_cost_new = (T*)ctemp + y * msg_step + x; const T* disparity_selected_cur = selected_disp_pyr_cur + y/2 * msg_step + x/2; const T* data_cost = data_cost_ + y * msg_step + x; for(int d = 0; d < nr_plane2; d++) { int idx2 = d * disp_step2; T val = data_cost[d * disp_step1] + u_cur[idx2] + d_cur[idx2] + l_cur[idx2] + r_cur[idx2]; data_cost_new[d * disp_step1] = val; } T* data_cost_selected = data_cost_selected_ + y * msg_step + x; T* disparity_selected_new = selected_disp_pyr_new + y * msg_step + x; T* u_new = u_new_ + y * msg_step + x; T* d_new = d_new_ + y * msg_step + x; T* l_new = l_new_ + y * msg_step + x; T* r_new = r_new_ + y * msg_step + x; u_cur = u_cur_ + y/2 * msg_step + x/2; d_cur = d_cur_ + y/2 * msg_step + x/2; l_cur = l_cur_ + y/2 * msg_step + x/2; r_cur = r_cur_ + y/2 * msg_step + x/2; get_first_k_element_increase(u_new, d_new, l_new, r_new, u_cur, d_cur, l_cur, r_cur, data_cost_selected, disparity_selected_new, data_cost_new, data_cost, disparity_selected_cur, nr_plane, nr_plane2, disp_step1, disp_step2); } } template<class T> void init_message(uchar *ctemp, T* u_new, T* d_new, T* l_new, T* r_new, const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur, T* selected_disp_pyr_new, const T* selected_disp_pyr_cur, T* data_cost_selected, const T* data_cost, size_t msg_step, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, hipStream_t stream) { size_t disp_step1 = msg_step * h; size_t disp_step2 = msg_step * h2; dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); hipLaunchKernelGGL(( init_message), dim3(grid), dim3(threads), 0, stream, ctemp, u_new, d_new, l_new, r_new, u_cur, d_cur, l_cur, r_cur, selected_disp_pyr_new, selected_disp_pyr_cur, data_cost_selected, data_cost, h, w, nr_plane, h2, w2, nr_plane2, msg_step, disp_step1, disp_step2); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void init_message(uchar *ctemp, short* u_new, short* d_new, short* l_new, short* r_new, const short* u_cur, const short* d_cur, const short* l_cur, const short* r_cur, short* selected_disp_pyr_new, const short* selected_disp_pyr_cur, short* data_cost_selected, const short* data_cost, size_t msg_step, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, hipStream_t stream); template void init_message(uchar *ctemp, float* u_new, float* d_new, float* l_new, float* r_new, const float* u_cur, const float* d_cur, const float* l_cur, const float* r_cur, float* selected_disp_pyr_new, const float* selected_disp_pyr_cur, float* data_cost_selected, const float* data_cost, size_t msg_step, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, hipStream_t stream); /////////////////////////////////////////////////////////////// //////////////////// calc all iterations ///////////////////// /////////////////////////////////////////////////////////////// template <typename T> __device__ void message_per_pixel(const T* data, T* msg_dst, const T* msg1, const T* msg2, const T* msg3, const T* dst_disp, const T* src_disp, int nr_plane, int max_disc_term, float disc_single_jump, volatile T* temp, size_t disp_step) { T minimum = numeric_limits<T>::max(); for(int d = 0; d < nr_plane; d++) { int idx = d * disp_step; T val = data[idx] + msg1[idx] + msg2[idx] + msg3[idx]; if(val < minimum) minimum = val; msg_dst[idx] = val; } float sum = 0; for(int d = 0; d < nr_plane; d++) { float cost_min = minimum + max_disc_term; T src_disp_reg = src_disp[d * disp_step]; for(int d2 = 0; d2 < nr_plane; d2++) cost_min = fmin(cost_min, msg_dst[d2 * disp_step] + disc_single_jump * ::abs(dst_disp[d2 * disp_step] - src_disp_reg)); temp[d * disp_step] = saturate_cast<T>(cost_min); sum += cost_min; } sum /= nr_plane; for(int d = 0; d < nr_plane; d++) msg_dst[d * disp_step] = saturate_cast<T>(temp[d * disp_step] - sum); } template <typename T> __global__ void compute_message(uchar *ctemp, T* u_, T* d_, T* l_, T* r_, const T* data_cost_selected, const T* selected_disp_pyr_cur, int h, int w, int nr_plane, int i, int max_disc_term, float disc_single_jump, size_t msg_step, size_t disp_step) { int y = blockIdx.y * blockDim.y + threadIdx.y; int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + i) & 1); if (y > 0 && y < h - 1 && x > 0 && x < w - 1) { const T* data = data_cost_selected + y * msg_step + x; T* u = u_ + y * msg_step + x; T* d = d_ + y * msg_step + x; T* l = l_ + y * msg_step + x; T* r = r_ + y * msg_step + x; const T* disp = selected_disp_pyr_cur + y * msg_step + x; T* temp = (T*)ctemp + y * msg_step + x; message_per_pixel(data, u, r - 1, u + msg_step, l + 1, disp, disp - msg_step, nr_plane, max_disc_term, disc_single_jump, temp, disp_step); message_per_pixel(data, d, d - msg_step, r - 1, l + 1, disp, disp + msg_step, nr_plane, max_disc_term, disc_single_jump, temp, disp_step); message_per_pixel(data, l, u + msg_step, d - msg_step, l + 1, disp, disp - 1, nr_plane, max_disc_term, disc_single_jump, temp, disp_step); message_per_pixel(data, r, u + msg_step, d - msg_step, r - 1, disp, disp + 1, nr_plane, max_disc_term, disc_single_jump, temp, disp_step); } } template<class T> void calc_all_iterations(uchar *ctemp, T* u, T* d, T* l, T* r, const T* data_cost_selected, const T* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, hipStream_t stream) { size_t disp_step = msg_step * h; dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x << 1); grid.y = divUp(h, threads.y); for(int t = 0; t < iters; ++t) { hipLaunchKernelGGL(( compute_message), dim3(grid), dim3(threads), 0, stream, ctemp, u, d, l, r, data_cost_selected, selected_disp_pyr_cur, h, w, nr_plane, t & 1, max_disc_term, disc_single_jump, msg_step, disp_step); cudaSafeCall( hipGetLastError() ); } if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); }; template void calc_all_iterations(uchar *ctemp, short* u, short* d, short* l, short* r, const short* data_cost_selected, const short* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, hipStream_t stream); template void calc_all_iterations(uchar *ctemp, float* u, float* d, float* l, float* r, const float* data_cost_selected, const float* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, hipStream_t stream); /////////////////////////////////////////////////////////////// /////////////////////////// output //////////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __global__ void compute_disp(const T* u_, const T* d_, const T* l_, const T* r_, const T* data_cost_selected, const T* disp_selected_pyr, PtrStepSz<short> disp, int nr_plane, size_t msg_step, size_t disp_step) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y > 0 && y < disp.rows - 1 && x > 0 && x < disp.cols - 1) { const T* data = data_cost_selected + y * msg_step + x; const T* disp_selected = disp_selected_pyr + y * msg_step + x; const T* u = u_ + (y+1) * msg_step + (x+0); const T* d = d_ + (y-1) * msg_step + (x+0); const T* l = l_ + (y+0) * msg_step + (x+1); const T* r = r_ + (y+0) * msg_step + (x-1); int best = 0; T best_val = numeric_limits<T>::max(); for (int i = 0; i < nr_plane; ++i) { int idx = i * disp_step; T val = data[idx]+ u[idx] + d[idx] + l[idx] + r[idx]; if (val < best_val) { best_val = val; best = saturate_cast<short>(disp_selected[idx]); } } disp(y, x) = best; } } template<class T> void compute_disp(const T* u, const T* d, const T* l, const T* r, const T* data_cost_selected, const T* disp_selected, size_t msg_step, const PtrStepSz<short>& disp, int nr_plane, hipStream_t stream) { size_t disp_step = disp.rows * msg_step; dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(disp.cols, threads.x); grid.y = divUp(disp.rows, threads.y); hipLaunchKernelGGL(( compute_disp), dim3(grid), dim3(threads), 0, stream, u, d, l, r, data_cost_selected, disp_selected, disp, nr_plane, msg_step, disp_step); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void compute_disp(const short* u, const short* d, const short* l, const short* r, const short* data_cost_selected, const short* disp_selected, size_t msg_step, const PtrStepSz<short>& disp, int nr_plane, hipStream_t stream); template void compute_disp(const float* u, const float* d, const float* l, const float* r, const float* data_cost_selected, const float* disp_selected, size_t msg_step, const PtrStepSz<short>& disp, int nr_plane, hipStream_t stream); } // namespace stereocsbp }}} // namespace cv { namespace cuda { namespace cudev { #endif /* CUDA_DISABLER */
43fa47b8741cc59d592103c121a36c160ec80920.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" #include "opencv2/core/cuda/limits.hpp" #include "opencv2/core/cuda/reduce.hpp" #include "opencv2/core/cuda/functional.hpp" #include "stereocsbp.hpp" namespace cv { namespace cuda { namespace device { namespace stereocsbp { /////////////////////////////////////////////////////////////// /////////////////////// init data cost //////////////////////// /////////////////////////////////////////////////////////////// template <int channels> static float __device__ pixeldiff(const uchar* left, const uchar* right, float max_data_term); template<> __device__ __forceinline__ float pixeldiff<1>(const uchar* left, const uchar* right, float max_data_term) { return fminf( ::abs((int)*left - *right), max_data_term); } template<> __device__ __forceinline__ float pixeldiff<3>(const uchar* left, const uchar* right, float max_data_term) { float tb = 0.114f * ::abs((int)left[0] - right[0]); float tg = 0.587f * ::abs((int)left[1] - right[1]); float tr = 0.299f * ::abs((int)left[2] - right[2]); return fminf(tr + tg + tb, max_data_term); } template<> __device__ __forceinline__ float pixeldiff<4>(const uchar* left, const uchar* right, float max_data_term) { uchar4 l = *((const uchar4*)left); uchar4 r = *((const uchar4*)right); float tb = 0.114f * ::abs((int)l.x - r.x); float tg = 0.587f * ::abs((int)l.y - r.y); float tr = 0.299f * ::abs((int)l.z - r.z); return fminf(tr + tg + tb, max_data_term); } template <typename T> __global__ void get_first_k_initial_global(uchar *ctemp, T* data_cost_selected_, T *selected_disp_pyr, int h, int w, int nr_plane, int ndisp, size_t msg_step, size_t disp_step) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { T* selected_disparity = selected_disp_pyr + y * msg_step + x; T* data_cost_selected = data_cost_selected_ + y * msg_step + x; T* data_cost = (T*)ctemp + y * msg_step + x; for(int i = 0; i < nr_plane; i++) { T minimum = device::numeric_limits<T>::max(); int id = 0; for(int d = 0; d < ndisp; d++) { T cur = data_cost[d * disp_step]; if(cur < minimum) { minimum = cur; id = d; } } data_cost_selected[i * disp_step] = minimum; selected_disparity[i * disp_step] = id; data_cost [id * disp_step] = numeric_limits<T>::max(); } } } template <typename T> __global__ void get_first_k_initial_local(uchar *ctemp, T* data_cost_selected_, T* selected_disp_pyr, int h, int w, int nr_plane, int ndisp, size_t msg_step, size_t disp_step) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { T* selected_disparity = selected_disp_pyr + y * msg_step + x; T* data_cost_selected = data_cost_selected_ + y * msg_step + x; T* data_cost = (T*)ctemp + y * msg_step + x; int nr_local_minimum = 0; T prev = data_cost[0 * disp_step]; T cur = data_cost[1 * disp_step]; T next = data_cost[2 * disp_step]; for (int d = 1; d < ndisp - 1 && nr_local_minimum < nr_plane; d++) { if (cur < prev && cur < next) { data_cost_selected[nr_local_minimum * disp_step] = cur; selected_disparity[nr_local_minimum * disp_step] = d; data_cost[d * disp_step] = numeric_limits<T>::max(); nr_local_minimum++; } prev = cur; cur = next; next = data_cost[(d + 1) * disp_step]; } for (int i = nr_local_minimum; i < nr_plane; i++) { T minimum = numeric_limits<T>::max(); int id = 0; for (int d = 0; d < ndisp; d++) { cur = data_cost[d * disp_step]; if (cur < minimum) { minimum = cur; id = d; } } data_cost_selected[i * disp_step] = minimum; selected_disparity[i * disp_step] = id; data_cost[id * disp_step] = numeric_limits<T>::max(); } } } template <typename T, int channels> __global__ void init_data_cost(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int h, int w, int level, int ndisp, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { int y0 = y << level; int yt = (y + 1) << level; int x0 = x << level; int xt = (x + 1) << level; T* data_cost = (T*)ctemp + y * msg_step + x; for(int d = 0; d < ndisp; ++d) { float val = 0.0f; for(int yi = y0; yi < yt; yi++) { for(int xi = x0; xi < xt; xi++) { int xr = xi - d; if(d < min_disp || xr < 0) val += data_weight * max_data_term; else { const uchar* lle = cleft + yi * cimg_step + xi * channels; const uchar* lri = cright + yi * cimg_step + xr * channels; val += data_weight * pixeldiff<channels>(lle, lri, max_data_term); } } } data_cost[disp_step * d] = saturate_cast<T>(val); } } } template <typename T, int winsz, int channels> __global__ void init_data_cost_reduce(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int level, int rows, int cols, int h, int ndisp, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step) { int x_out = blockIdx.x; int y_out = blockIdx.y % h; int d = (blockIdx.y / h) * blockDim.z + threadIdx.z; int tid = threadIdx.x; if (d < ndisp) { int x0 = x_out << level; int y0 = y_out << level; int len = ::min(y0 + winsz, rows) - y0; float val = 0.0f; if (x0 + tid < cols) { if (x0 + tid - d < 0 || d < min_disp) val = data_weight * max_data_term * len; else { const uchar* lle = cleft + y0 * cimg_step + channels * (x0 + tid ); const uchar* lri = cright + y0 * cimg_step + channels * (x0 + tid - d); for(int y = 0; y < len; ++y) { val += data_weight * pixeldiff<channels>(lle, lri, max_data_term); lle += cimg_step; lri += cimg_step; } } } extern __shared__ float smem[]; reduce<winsz>(smem + winsz * threadIdx.z, val, tid, plus<float>()); T* data_cost = (T*)ctemp + y_out * msg_step + x_out; if (tid == 0) data_cost[disp_step * d] = saturate_cast<T>(val); } } template <typename T> void init_data_cost_caller_(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int /*rows*/, int /*cols*/, int h, int w, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); switch (channels) { case 1: init_data_cost<T, 1><<<grid, threads, 0, stream>>>(cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; case 3: init_data_cost<T, 3><<<grid, threads, 0, stream>>>(cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; case 4: init_data_cost<T, 4><<<grid, threads, 0, stream>>>(cleft, cright, ctemp, cimg_step, h, w, level, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count"); } } template <typename T, int winsz> void init_data_cost_reduce_caller_(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, int h, int w, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, cudaStream_t stream) { const int threadsNum = 256; const size_t smem_size = threadsNum * sizeof(float); dim3 threads(winsz, 1, threadsNum / winsz); dim3 grid(w, h, 1); grid.y *= divUp(ndisp, threads.z); switch (channels) { case 1: init_data_cost_reduce<T, winsz, 1><<<grid, threads, smem_size, stream>>>(cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; case 3: init_data_cost_reduce<T, winsz, 3><<<grid, threads, smem_size, stream>>>(cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; case 4: init_data_cost_reduce<T, winsz, 4><<<grid, threads, smem_size, stream>>>(cleft, cright, ctemp, cimg_step, level, rows, cols, h, ndisp, data_weight, max_data_term, min_disp, msg_step, disp_step); break; default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count"); } } template<class T> void init_data_cost(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, T* disp_selected_pyr, T* data_cost_selected, size_t msg_step, int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, cudaStream_t stream) { typedef void (*InitDataCostCaller)(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int cols, int rows, int w, int h, int level, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step, cudaStream_t stream); static const InitDataCostCaller init_data_cost_callers[] = { init_data_cost_caller_<T>, init_data_cost_caller_<T>, init_data_cost_reduce_caller_<T, 4>, init_data_cost_reduce_caller_<T, 8>, init_data_cost_reduce_caller_<T, 16>, init_data_cost_reduce_caller_<T, 32>, init_data_cost_reduce_caller_<T, 64>, init_data_cost_reduce_caller_<T, 128>, init_data_cost_reduce_caller_<T, 256> }; size_t disp_step = msg_step * h; init_data_cost_callers[level](cleft, cright, ctemp, cimg_step, rows, cols, h, w, level, ndisp, channels, data_weight, max_data_term, min_disp, msg_step, disp_step, stream); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); if (use_local_init_data_cost == true) get_first_k_initial_local<<<grid, threads, 0, stream>>> (ctemp, data_cost_selected, disp_selected_pyr, h, w, nr_plane, ndisp, msg_step, disp_step); else get_first_k_initial_global<<<grid, threads, 0, stream>>>(ctemp, data_cost_selected, disp_selected_pyr, h, w, nr_plane, ndisp, msg_step, disp_step); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void init_data_cost<short>(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, short* disp_selected_pyr, short* data_cost_selected, size_t msg_step, int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, cudaStream_t stream); template void init_data_cost<float>(const uchar *cleft, const uchar *cright, uchar *ctemp, size_t cimg_step, int rows, int cols, float* disp_selected_pyr, float* data_cost_selected, size_t msg_step, int h, int w, int level, int nr_plane, int ndisp, int channels, float data_weight, float max_data_term, int min_disp, bool use_local_init_data_cost, cudaStream_t stream); /////////////////////////////////////////////////////////////// ////////////////////// compute data cost ////////////////////// /////////////////////////////////////////////////////////////// template <typename T, int channels> __global__ void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* selected_disp_pyr, T* data_cost_, int h, int w, int level, int nr_plane, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { int y0 = y << level; int yt = (y + 1) << level; int x0 = x << level; int xt = (x + 1) << level; const T* selected_disparity = selected_disp_pyr + y/2 * msg_step + x/2; T* data_cost = data_cost_ + y * msg_step + x; for(int d = 0; d < nr_plane; d++) { float val = 0.0f; for(int yi = y0; yi < yt; yi++) { for(int xi = x0; xi < xt; xi++) { int sel_disp = selected_disparity[d * disp_step2]; int xr = xi - sel_disp; if (xr < 0 || sel_disp < min_disp) val += data_weight * max_data_term; else { const uchar* left_x = cleft + yi * cimg_step + xi * channels; const uchar* right_x = cright + yi * cimg_step + xr * channels; val += data_weight * pixeldiff<channels>(left_x, right_x, max_data_term); } } } data_cost[disp_step1 * d] = saturate_cast<T>(val); } } } template <typename T, int winsz, int channels> __global__ void compute_data_cost_reduce(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* selected_disp_pyr, T* data_cost_, int level, int rows, int cols, int h, int nr_plane, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2) { int x_out = blockIdx.x; int y_out = blockIdx.y % h; int d = (blockIdx.y / h) * blockDim.z + threadIdx.z; int tid = threadIdx.x; const T* selected_disparity = selected_disp_pyr + y_out/2 * msg_step + x_out/2; T* data_cost = data_cost_ + y_out * msg_step + x_out; if (d < nr_plane) { int sel_disp = selected_disparity[d * disp_step2]; int x0 = x_out << level; int y0 = y_out << level; int len = ::min(y0 + winsz, rows) - y0; float val = 0.0f; if (x0 + tid < cols) { if (x0 + tid - sel_disp < 0 || sel_disp < min_disp) val = data_weight * max_data_term * len; else { const uchar* lle = cleft + y0 * cimg_step + channels * (x0 + tid ); const uchar* lri = cright + y0 * cimg_step + channels * (x0 + tid - sel_disp); for(int y = 0; y < len; ++y) { val += data_weight * pixeldiff<channels>(lle, lri, max_data_term); lle += cimg_step; lri += cimg_step; } } } extern __shared__ float smem[]; reduce<winsz>(smem + winsz * threadIdx.z, val, tid, plus<float>()); if (tid == 0) data_cost[disp_step1 * d] = saturate_cast<T>(val); } } template <typename T> void compute_data_cost_caller_(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int /*rows*/, int /*cols*/, int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); switch(channels) { case 1: compute_data_cost<T, 1><<<grid, threads, 0, stream>>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; case 3: compute_data_cost<T, 3><<<grid, threads, 0, stream>>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; case 4: compute_data_cost<T, 4><<<grid, threads, 0, stream>>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, h, w, level, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count"); } } template <typename T, int winsz> void compute_data_cost_reduce_caller_(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int rows, int cols, int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, cudaStream_t stream) { const int threadsNum = 256; const size_t smem_size = threadsNum * sizeof(float); dim3 threads(winsz, 1, threadsNum / winsz); dim3 grid(w, h, 1); grid.y *= divUp(nr_plane, threads.z); switch (channels) { case 1: compute_data_cost_reduce<T, winsz, 1><<<grid, threads, smem_size, stream>>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; case 3: compute_data_cost_reduce<T, winsz, 3><<<grid, threads, smem_size, stream>>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; case 4: compute_data_cost_reduce<T, winsz, 4><<<grid, threads, smem_size, stream>>>(cleft, cright, cimg_step, disp_selected_pyr, data_cost, level, rows, cols, h, nr_plane, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2); break; default: CV_Error(cv::Error::BadNumChannels, "Unsupported channels count"); } } template<class T> void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, size_t msg_step, int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, cudaStream_t stream) { typedef void (*ComputeDataCostCaller)(const uchar *cleft, const uchar *cright, size_t cimg_step, const T* disp_selected_pyr, T* data_cost, int rows, int cols, int h, int w, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, size_t msg_step, size_t disp_step1, size_t disp_step2, cudaStream_t stream); static const ComputeDataCostCaller callers[] = { compute_data_cost_caller_<T>, compute_data_cost_caller_<T>, compute_data_cost_reduce_caller_<T, 4>, compute_data_cost_reduce_caller_<T, 8>, compute_data_cost_reduce_caller_<T, 16>, compute_data_cost_reduce_caller_<T, 32>, compute_data_cost_reduce_caller_<T, 64>, compute_data_cost_reduce_caller_<T, 128>, compute_data_cost_reduce_caller_<T, 256> }; size_t disp_step1 = msg_step * h; size_t disp_step2 = msg_step * h2; callers[level](cleft, cright, cimg_step, disp_selected_pyr, data_cost, rows, cols, h, w, level, nr_plane, channels, data_weight, max_data_term, min_disp, msg_step, disp_step1, disp_step2, stream); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const short* disp_selected_pyr, short* data_cost, size_t msg_step, int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, cudaStream_t stream); template void compute_data_cost(const uchar *cleft, const uchar *cright, size_t cimg_step, const float* disp_selected_pyr, float* data_cost, size_t msg_step, int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, float data_weight, float max_data_term, int min_disp, cudaStream_t stream); /////////////////////////////////////////////////////////////// //////////////////////// init message ///////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __device__ void get_first_k_element_increase(T* u_new, T* d_new, T* l_new, T* r_new, const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur, T* data_cost_selected, T* disparity_selected_new, T* data_cost_new, const T* data_cost_cur, const T* disparity_selected_cur, int nr_plane, int nr_plane2, size_t disp_step1, size_t disp_step2) { for(int i = 0; i < nr_plane; i++) { T minimum = numeric_limits<T>::max(); int id = 0; for(int j = 0; j < nr_plane2; j++) { T cur = data_cost_new[j * disp_step1]; if(cur < minimum) { minimum = cur; id = j; } } data_cost_selected[i * disp_step1] = data_cost_cur[id * disp_step1]; disparity_selected_new[i * disp_step1] = disparity_selected_cur[id * disp_step2]; u_new[i * disp_step1] = u_cur[id * disp_step2]; d_new[i * disp_step1] = d_cur[id * disp_step2]; l_new[i * disp_step1] = l_cur[id * disp_step2]; r_new[i * disp_step1] = r_cur[id * disp_step2]; data_cost_new[id * disp_step1] = numeric_limits<T>::max(); } } template <typename T> __global__ void init_message(uchar *ctemp, T* u_new_, T* d_new_, T* l_new_, T* r_new_, const T* u_cur_, const T* d_cur_, const T* l_cur_, const T* r_cur_, T* selected_disp_pyr_new, const T* selected_disp_pyr_cur, T* data_cost_selected_, const T* data_cost_, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, size_t msg_step, size_t disp_step1, size_t disp_step2) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { const T* u_cur = u_cur_ + ::min(h2-1, y/2 + 1) * msg_step + x/2; const T* d_cur = d_cur_ + ::max(0, y/2 - 1) * msg_step + x/2; const T* l_cur = l_cur_ + (y/2) * msg_step + ::min(w2-1, x/2 + 1); const T* r_cur = r_cur_ + (y/2) * msg_step + ::max(0, x/2 - 1); T* data_cost_new = (T*)ctemp + y * msg_step + x; const T* disparity_selected_cur = selected_disp_pyr_cur + y/2 * msg_step + x/2; const T* data_cost = data_cost_ + y * msg_step + x; for(int d = 0; d < nr_plane2; d++) { int idx2 = d * disp_step2; T val = data_cost[d * disp_step1] + u_cur[idx2] + d_cur[idx2] + l_cur[idx2] + r_cur[idx2]; data_cost_new[d * disp_step1] = val; } T* data_cost_selected = data_cost_selected_ + y * msg_step + x; T* disparity_selected_new = selected_disp_pyr_new + y * msg_step + x; T* u_new = u_new_ + y * msg_step + x; T* d_new = d_new_ + y * msg_step + x; T* l_new = l_new_ + y * msg_step + x; T* r_new = r_new_ + y * msg_step + x; u_cur = u_cur_ + y/2 * msg_step + x/2; d_cur = d_cur_ + y/2 * msg_step + x/2; l_cur = l_cur_ + y/2 * msg_step + x/2; r_cur = r_cur_ + y/2 * msg_step + x/2; get_first_k_element_increase(u_new, d_new, l_new, r_new, u_cur, d_cur, l_cur, r_cur, data_cost_selected, disparity_selected_new, data_cost_new, data_cost, disparity_selected_cur, nr_plane, nr_plane2, disp_step1, disp_step2); } } template<class T> void init_message(uchar *ctemp, T* u_new, T* d_new, T* l_new, T* r_new, const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur, T* selected_disp_pyr_new, const T* selected_disp_pyr_cur, T* data_cost_selected, const T* data_cost, size_t msg_step, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream) { size_t disp_step1 = msg_step * h; size_t disp_step2 = msg_step * h2; dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x); grid.y = divUp(h, threads.y); init_message<<<grid, threads, 0, stream>>>(ctemp, u_new, d_new, l_new, r_new, u_cur, d_cur, l_cur, r_cur, selected_disp_pyr_new, selected_disp_pyr_cur, data_cost_selected, data_cost, h, w, nr_plane, h2, w2, nr_plane2, msg_step, disp_step1, disp_step2); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void init_message(uchar *ctemp, short* u_new, short* d_new, short* l_new, short* r_new, const short* u_cur, const short* d_cur, const short* l_cur, const short* r_cur, short* selected_disp_pyr_new, const short* selected_disp_pyr_cur, short* data_cost_selected, const short* data_cost, size_t msg_step, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream); template void init_message(uchar *ctemp, float* u_new, float* d_new, float* l_new, float* r_new, const float* u_cur, const float* d_cur, const float* l_cur, const float* r_cur, float* selected_disp_pyr_new, const float* selected_disp_pyr_cur, float* data_cost_selected, const float* data_cost, size_t msg_step, int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream); /////////////////////////////////////////////////////////////// //////////////////// calc all iterations ///////////////////// /////////////////////////////////////////////////////////////// template <typename T> __device__ void message_per_pixel(const T* data, T* msg_dst, const T* msg1, const T* msg2, const T* msg3, const T* dst_disp, const T* src_disp, int nr_plane, int max_disc_term, float disc_single_jump, volatile T* temp, size_t disp_step) { T minimum = numeric_limits<T>::max(); for(int d = 0; d < nr_plane; d++) { int idx = d * disp_step; T val = data[idx] + msg1[idx] + msg2[idx] + msg3[idx]; if(val < minimum) minimum = val; msg_dst[idx] = val; } float sum = 0; for(int d = 0; d < nr_plane; d++) { float cost_min = minimum + max_disc_term; T src_disp_reg = src_disp[d * disp_step]; for(int d2 = 0; d2 < nr_plane; d2++) cost_min = fmin(cost_min, msg_dst[d2 * disp_step] + disc_single_jump * ::abs(dst_disp[d2 * disp_step] - src_disp_reg)); temp[d * disp_step] = saturate_cast<T>(cost_min); sum += cost_min; } sum /= nr_plane; for(int d = 0; d < nr_plane; d++) msg_dst[d * disp_step] = saturate_cast<T>(temp[d * disp_step] - sum); } template <typename T> __global__ void compute_message(uchar *ctemp, T* u_, T* d_, T* l_, T* r_, const T* data_cost_selected, const T* selected_disp_pyr_cur, int h, int w, int nr_plane, int i, int max_disc_term, float disc_single_jump, size_t msg_step, size_t disp_step) { int y = blockIdx.y * blockDim.y + threadIdx.y; int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + i) & 1); if (y > 0 && y < h - 1 && x > 0 && x < w - 1) { const T* data = data_cost_selected + y * msg_step + x; T* u = u_ + y * msg_step + x; T* d = d_ + y * msg_step + x; T* l = l_ + y * msg_step + x; T* r = r_ + y * msg_step + x; const T* disp = selected_disp_pyr_cur + y * msg_step + x; T* temp = (T*)ctemp + y * msg_step + x; message_per_pixel(data, u, r - 1, u + msg_step, l + 1, disp, disp - msg_step, nr_plane, max_disc_term, disc_single_jump, temp, disp_step); message_per_pixel(data, d, d - msg_step, r - 1, l + 1, disp, disp + msg_step, nr_plane, max_disc_term, disc_single_jump, temp, disp_step); message_per_pixel(data, l, u + msg_step, d - msg_step, l + 1, disp, disp - 1, nr_plane, max_disc_term, disc_single_jump, temp, disp_step); message_per_pixel(data, r, u + msg_step, d - msg_step, r - 1, disp, disp + 1, nr_plane, max_disc_term, disc_single_jump, temp, disp_step); } } template<class T> void calc_all_iterations(uchar *ctemp, T* u, T* d, T* l, T* r, const T* data_cost_selected, const T* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, cudaStream_t stream) { size_t disp_step = msg_step * h; dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(w, threads.x << 1); grid.y = divUp(h, threads.y); for(int t = 0; t < iters; ++t) { compute_message<<<grid, threads, 0, stream>>>(ctemp, u, d, l, r, data_cost_selected, selected_disp_pyr_cur, h, w, nr_plane, t & 1, max_disc_term, disc_single_jump, msg_step, disp_step); cudaSafeCall( cudaGetLastError() ); } if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); }; template void calc_all_iterations(uchar *ctemp, short* u, short* d, short* l, short* r, const short* data_cost_selected, const short* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, cudaStream_t stream); template void calc_all_iterations(uchar *ctemp, float* u, float* d, float* l, float* r, const float* data_cost_selected, const float* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, int max_disc_term, float disc_single_jump, cudaStream_t stream); /////////////////////////////////////////////////////////////// /////////////////////////// output //////////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __global__ void compute_disp(const T* u_, const T* d_, const T* l_, const T* r_, const T* data_cost_selected, const T* disp_selected_pyr, PtrStepSz<short> disp, int nr_plane, size_t msg_step, size_t disp_step) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y > 0 && y < disp.rows - 1 && x > 0 && x < disp.cols - 1) { const T* data = data_cost_selected + y * msg_step + x; const T* disp_selected = disp_selected_pyr + y * msg_step + x; const T* u = u_ + (y+1) * msg_step + (x+0); const T* d = d_ + (y-1) * msg_step + (x+0); const T* l = l_ + (y+0) * msg_step + (x+1); const T* r = r_ + (y+0) * msg_step + (x-1); int best = 0; T best_val = numeric_limits<T>::max(); for (int i = 0; i < nr_plane; ++i) { int idx = i * disp_step; T val = data[idx]+ u[idx] + d[idx] + l[idx] + r[idx]; if (val < best_val) { best_val = val; best = saturate_cast<short>(disp_selected[idx]); } } disp(y, x) = best; } } template<class T> void compute_disp(const T* u, const T* d, const T* l, const T* r, const T* data_cost_selected, const T* disp_selected, size_t msg_step, const PtrStepSz<short>& disp, int nr_plane, cudaStream_t stream) { size_t disp_step = disp.rows * msg_step; dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(disp.cols, threads.x); grid.y = divUp(disp.rows, threads.y); compute_disp<<<grid, threads, 0, stream>>>(u, d, l, r, data_cost_selected, disp_selected, disp, nr_plane, msg_step, disp_step); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void compute_disp(const short* u, const short* d, const short* l, const short* r, const short* data_cost_selected, const short* disp_selected, size_t msg_step, const PtrStepSz<short>& disp, int nr_plane, cudaStream_t stream); template void compute_disp(const float* u, const float* d, const float* l, const float* r, const float* data_cost_selected, const float* disp_selected, size_t msg_step, const PtrStepSz<short>& disp, int nr_plane, cudaStream_t stream); } // namespace stereocsbp }}} // namespace cv { namespace cuda { namespace cudev { #endif /* CUDA_DISABLER */
f070f0719609a85d6090a7efd1c8e291dc432316.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include <math.h> #include <vector> #include "CycleTimer.h" #include "crun.h" #include "rutil.h" #define BLOCK_SIZE 16 #define HUB_BLOCK_SIZE 32 #define TRANSITION_PROB 0.02 * 0.1 float toBW(int bytes, float sec) { return (float)(bytes) / (1024. * 1024. * 1024.) / sec; } int *hub_device; bool *mask_device; int *neighbor_device; int *neighbor_start_device; double *initial_load_factor_device; int *rat_count_device; int *healthy_rat_count_device; int *exposed_rat_count_device; int *infectious_rat_count_device; double *weight_result_device; double *sum_weight_result_device; extern "C" void init_cuda(state_t *s) { graph_t *g = s->g; int nnode = g->nnode; int nedge = g->nedge; int nhub = g->nhub; hipMalloc(&hub_device, sizeof(int) * nhub); hipMalloc(&mask_device, sizeof(bool) * nnode); hipMalloc(&neighbor_device, sizeof(int) * (nnode+nedge)); hipMalloc(&neighbor_start_device, sizeof(int) * (nnode+1)); hipMalloc(&initial_load_factor_device, sizeof(double) * nnode); hipMalloc(&rat_count_device, sizeof(int) * nnode); hipMalloc(&healthy_rat_count_device, sizeof(int) * nnode); hipMalloc(&exposed_rat_count_device, sizeof(int) * nnode); hipMalloc(&infectious_rat_count_device, sizeof(int) * nnode); hipMalloc(&weight_result_device, sizeof(double) * nnode); hipMalloc(&sum_weight_result_device, sizeof(double) * nnode); hipMemcpy(rat_count_device, s->rat_count, sizeof(int) * nnode, hipMemcpyHostToDevice); int *temp = (int *)malloc(sizeof(int) * nnode); for (int i = 0; i < nnode; i ++) { temp[i] = s->rat_count[i] - s->exposed_rat_count[i] - s->infectious_rat_count[i]; } hipMemcpy(healthy_rat_count_device, temp, sizeof(int) * nnode, hipMemcpyHostToDevice); free(temp); hipMemcpy(exposed_rat_count_device, s->exposed_rat_count, sizeof(int) * nnode, hipMemcpyHostToDevice); hipMemcpy(infectious_rat_count_device, s->infectious_rat_count, sizeof(int) * nnode, hipMemcpyHostToDevice); hipMemcpy(initial_load_factor_device, s->initial_load_factor, sizeof(double) * nnode, hipMemcpyHostToDevice); hipMemcpy(hub_device, g->hub, sizeof(int) * nhub, hipMemcpyHostToDevice); hipMemcpy(mask_device, g->mask, sizeof(bool) * nnode, hipMemcpyHostToDevice); hipMemcpy(neighbor_device, g->neighbor, sizeof(int) * (nnode+nedge), hipMemcpyHostToDevice); hipMemcpy(neighbor_start_device, g->neighbor_start, sizeof(int) * (nnode+1), hipMemcpyHostToDevice); } extern "C" void clean_cuda() { hipFree(hub_device); hipFree(mask_device); hipFree(neighbor_device); hipFree(neighbor_start_device); hipFree(initial_load_factor_device); hipFree(rat_count_device); hipFree(healthy_rat_count_device); hipFree(exposed_rat_count_device); hipFree(infectious_rat_count_device); hipFree(weight_result_device); hipFree(sum_weight_result_device); } __device__ __inline__ double mweight_kernel(double val, double optval) { double arg = 1.0 + COEFF * (val - optval); double lg = log(arg) * M_LOG2E; double denom = 1.0 + lg * lg; return 1.0/denom; } /* Compute imbalance between local and remote values */ /* Result < 0 when lcount > rcount and > 0 when lcount < rcount */ __device__ __inline__ double imbalance_kernel(int lcount, int rcount) { if (lcount == 0 && rcount == 0) return 0.0; double sl = sqrt((double) lcount); double sr = sqrt((double) rcount); return (sr-sl)/(sr+sl); } __device__ __inline__ double imbalance_density_kernel(double ldensity, double rdensity) { return (rdensity - ldensity) / (rdensity + ldensity); } __device__ __inline__ double neighbor_ilf_fast_kernel(double load_factor, double *initial_load_factor, int* share_rat_count, int* share_infectious_rat_count, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int nid = y * width + x; // thread_index is node id int share_block_size = BLOCK_SIZE+2; int in_block_x = threadIdx.x + 1; int in_block_y = threadIdx.y + 1; int in_block_id = in_block_x + in_block_y * share_block_size; double sum = 0.0; double ldensity, rdensity; int remote_x, remote_y, remote_in_block_x, remote_in_block_y, remote_nid, remote_in_block_id; int outdegree = 4; if (x == 0) { outdegree--; } if (y == 0) { outdegree--; } if (x == width -1) { outdegree--; } if (y == height-1) { outdegree--; } ldensity = (share_rat_count[in_block_id] == 0) ? 0.0 : 1.0 * share_infectious_rat_count[in_block_id] / share_rat_count[in_block_id]; //up remote_x = x; remote_y = y+1; remote_nid = remote_x + remote_y * width; remote_in_block_x = in_block_x; remote_in_block_y = in_block_y+1; remote_in_block_id = remote_in_block_x + remote_in_block_y * share_block_size; if (remote_y < height) { rdensity = (share_rat_count[remote_in_block_id] == 0) ? 0.0 : 1.0 * share_infectious_rat_count[remote_in_block_id] / share_rat_count[remote_in_block_id]; double r = (ldensity == 0.0 && rdensity == 0.0) ? 0.0 :imbalance_density_kernel(ldensity, rdensity); sum += r; } //down remote_x = x; remote_y = y-1; remote_nid = remote_x + remote_y * width; remote_in_block_x = in_block_x; remote_in_block_y = in_block_y-1; remote_in_block_id = remote_in_block_x + remote_in_block_y * share_block_size; if (remote_y >= 0) { rdensity = (share_rat_count[remote_in_block_id] == 0) ? 0.0 : 1.0 * share_infectious_rat_count[remote_in_block_id] / share_rat_count[remote_in_block_id]; double r = (ldensity == 0.0 && rdensity == 0.0) ? 0.0 :imbalance_density_kernel(ldensity, rdensity); sum += r; } //left remote_x = x-1; remote_y = y; remote_nid = remote_x + remote_y * width; remote_in_block_x = in_block_x-1; remote_in_block_y = in_block_y; remote_in_block_id = remote_in_block_x + remote_in_block_y * share_block_size; if (remote_x >= 0) { rdensity = (share_rat_count[remote_in_block_id] == 0) ? 0.0 : 1.0 * share_infectious_rat_count[remote_in_block_id] / share_rat_count[remote_in_block_id]; double r = (ldensity == 0.0 && rdensity == 0.0) ? 0.0 :imbalance_density_kernel(ldensity, rdensity); sum += r; } //right remote_x = x+1; remote_y = y; remote_nid = remote_x + remote_y * width; remote_in_block_x = in_block_x+1; remote_in_block_y = in_block_y; remote_in_block_id = remote_in_block_x + remote_in_block_y * share_block_size; if (remote_x <width) { rdensity = (share_rat_count[remote_in_block_id] == 0) ? 0.0 : 1.0 * share_infectious_rat_count[remote_in_block_id] / share_rat_count[remote_in_block_id]; double r = (ldensity == 0.0 && rdensity == 0.0) ? 0.0 :imbalance_density_kernel(ldensity, rdensity); sum += r; } // change to a new ilf, where each node has different initial base ilf double ilf = BASE_ILF * (initial_load_factor[nid] / load_factor) + ILF_VARIABILITY * (sum / outdegree); return ilf; } __device__ __inline__ double neighbor_ilf_hub_kernel(double load_factor, double *initial_load_factor, int *rat_count, int *infectious_rat_count, int *neighbor, int *neighbor_start, int nid, int max_outdegree) { int outdegree = neighbor_start[nid+1] - neighbor_start[nid] - 1; outdegree = min(outdegree, max_outdegree); int *start = &neighbor[neighbor_start[nid]+1]; int i; double sum = 0.0; for (i = 0; i < outdegree; i++) { double ldensity = (rat_count[nid] == 0) ? 0.0 : 1.0 * infectious_rat_count[nid] / rat_count[nid]; double rdensity = (rat_count[start[i]] == 0) ? 0.0 : 1.0 * infectious_rat_count[start[i]] / rat_count[start[i]]; double r = (ldensity == 0.0 && rdensity == 0.0) ? 0.0 : imbalance_density_kernel(ldensity, rdensity); sum += r; } // change to a new ilf, where each node has different initial base ilf double ilf = BASE_ILF * (initial_load_factor[nid] / load_factor) + ILF_VARIABILITY * (sum / outdegree); return ilf; } __global__ void compute_weight_hub_kernel(double load_factor, double *initial_load_factor, int* hub, int nhub, int *rat_count, int *infectious_rat_count, int *neighbor, int *neighbor_start, double* result) { // compute overall index from position of thread in current block, // and given the block we are in int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < nhub) { int nodeid = hub[x]; double ilf = neighbor_ilf_hub_kernel(load_factor, initial_load_factor, rat_count, infectious_rat_count, neighbor, neighbor_start, nodeid, INT_MAX); // INT_MAX means compute for all possible neighbors int count = rat_count[nodeid]; result[nodeid] = mweight_kernel((double) count/load_factor, ilf); } } __global__ void compute_weight_kernel(bool *mask, double load_factor, double *initial_load_factor, int *rat_count, int *infectious_rat_count, int *neighbor, int *neighbor_start, double* result, int width, int height) { // compute overall index from position of thread in current block, // and given the block we are in int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int thread_index = y * width + x; // thread_index is node id int share_block_size = BLOCK_SIZE+2; int in_block_x = threadIdx.x+1; int in_block_y = threadIdx.y+1; int in_block_id = in_block_x + in_block_y * share_block_size; __shared__ int share_rat_count[(BLOCK_SIZE+2) * (BLOCK_SIZE+2)]; __shared__ int share_infectious_rat_count[(BLOCK_SIZE+2) * (BLOCK_SIZE+2)]; share_rat_count[in_block_id] = rat_count[thread_index]; share_infectious_rat_count[in_block_id] = infectious_rat_count[thread_index]; if (threadIdx.x == 0 && x > 0) { share_rat_count[in_block_id-1] = rat_count[thread_index-1]; share_infectious_rat_count[in_block_id-1] = infectious_rat_count[thread_index-1]; } if (threadIdx.x == BLOCK_SIZE-1 && x < width - 1) { share_rat_count[in_block_id+1] = rat_count[thread_index+1]; share_infectious_rat_count[in_block_id+1] = infectious_rat_count[thread_index+1]; } if (threadIdx.y == 0 && y > 0) { share_rat_count[in_block_id - share_block_size] = rat_count[thread_index - width]; share_infectious_rat_count[in_block_id - share_block_size] = infectious_rat_count[thread_index - width]; } if (threadIdx.y == BLOCK_SIZE-1 && y < height - 1) { share_rat_count[in_block_id + share_block_size] = rat_count[thread_index + width]; share_infectious_rat_count[in_block_id + share_block_size] = infectious_rat_count[thread_index + width]; } if (x < width && y < height && mask[thread_index]){ // double ilf = neighbor_ilf_hub_kernel(load_factor, initial_load_factor, rat_count, infectious_rat_count, neighbor, neighbor_start, thread_index, HUB_THREASHOLD); // INT_MAX means compute for all possible neighbors double ilf = neighbor_ilf_fast_kernel(load_factor, initial_load_factor, share_rat_count, share_infectious_rat_count, width, height); int count = share_rat_count[in_block_id]; result[thread_index] = mweight_kernel((double) count/load_factor, ilf); } } __global__ void find_all_sums_hub_kernel(int* hub, int nhub, double *node_weight, int *neighbor, int *neighbor_start, double *sum_weight_result){ int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < nhub) { int nid = hub[x]; double sum = 0.0; for (int eid = neighbor_start[nid]; eid < neighbor_start[nid+1]; eid++) { // this eid is just index of the neighbor in the neighbor array sum += node_weight[neighbor[eid]]; } sum_weight_result[nid] = sum; } } __global__ void find_all_sums_kernel(bool *mask, double *node_weight, int *neighbor, int *neighbor_start, double *sum_weight_result, int width, int height){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int nid = y * width + x; // thread_index is node id if (x < width && y < height && mask[nid]){ double sum = 0.0; int end = min(neighbor_start[nid+1], neighbor_start[nid]+HUB_THREASHOLD+1); //+1 because HUB_THREASHOLD is out degree for (int eid = neighbor_start[nid]; eid < end; eid++) { // this eid is just index of the neighbor in the neighbor array sum += node_weight[neighbor[eid]]; } sum_weight_result[nid] = sum; } } __global__ void next_move_kernel(int *rat_count, int *healthy_rat_count, int *exposed_rat_count, int *infectious_rat_count, double *node_weight, double *sum_weight_result,int *neighbor, int *neighbor_start, int width, int height, double batch_fraction){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int nid = y * width + x; if (x < width && y < height){ for (int eid = neighbor_start[nid]; eid < neighbor_start[nid+1]; eid++) { int remote_node = neighbor[eid]; double move_prob = batch_fraction * node_weight[remote_node] / sum_weight_result[nid]; // check 0 int move_rat = rat_count[nid] * move_prob; int move_healthy = healthy_rat_count[nid] * move_prob; int move_exposed = exposed_rat_count[nid] * move_prob; int move_infectious = infectious_rat_count[nid] * move_prob; atomicAdd(&rat_count[remote_node], move_rat); atomicAdd(&healthy_rat_count[remote_node], move_healthy); atomicAdd(&exposed_rat_count[remote_node], move_exposed); atomicAdd(&infectious_rat_count[remote_node], move_infectious); rat_count[nid] -= move_rat; healthy_rat_count[nid] -= move_healthy; exposed_rat_count[nid] -= move_exposed; infectious_rat_count[nid] -= move_infectious; } } } __device__ __inline__ double compute_single_infection_prob(int alpha, int beta, int rat_count, int exposed_rat_count, int infectious_rat_count) { // note here we only pass values of one node if (rat_count == 0) { return 0.0; } else { double density_of_exposed = exposed_rat_count / rat_count; double density_of_infectious = infectious_rat_count / rat_count; return alpha * density_of_infectious + beta * density_of_exposed; } } __global__ void compute_infection_prob_and_update_status_kernel(int alpha, int beta, int *rat_count, int *healthy_rat_count, int *exposed_rat_count, int *infectious_rat_count, int width, int height, double batch_fraction){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int nid = y * width + x; double batch_transition_prob = TRANSITION_PROB * batch_fraction; if (x < width && y < height){ double prob = compute_single_infection_prob(alpha, beta, rat_count[nid], exposed_rat_count[nid], infectious_rat_count[nid]); int num_healthy_to_exposed = healthy_rat_count[nid] * prob; int num_exposed_to_infectious = exposed_rat_count[nid] * batch_transition_prob; int num_infectious_to_recovery = infectious_rat_count[nid] * batch_transition_prob; healthy_rat_count[nid] = healthy_rat_count[nid] - num_healthy_to_exposed; exposed_rat_count[nid] = exposed_rat_count[nid] + num_healthy_to_exposed - num_exposed_to_infectious; infectious_rat_count[nid] = infectious_rat_count[nid] + num_exposed_to_infectious - num_infectious_to_recovery; } } extern "C" void compute_all_weights_cuda(state_t *s){ graph_t *g = s->g; int nnode = g->nnode; int width = g->width; int height = g->height; int nhub = g->nhub; int totalBytes = sizeof(double) * nnode; double startTime = CycleTimer::currentSeconds(); dim3 hubBlockDim(HUB_BLOCK_SIZE); int hub_num_block_x = (nhub + HUB_BLOCK_SIZE - 1) / HUB_BLOCK_SIZE; dim3 hubGridDim(hub_num_block_x); hipLaunchKernelGGL(( compute_weight_hub_kernel), dim3(hubGridDim), dim3(hubBlockDim), 0, 0, s->load_factor, initial_load_factor_device, hub_device, nhub, rat_count_device,infectious_rat_count_device, neighbor_device, neighbor_start_device, weight_result_device); // hipDeviceSynchronize(); // printf("compute_weights: Overall hub: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * (CycleTimer::currentSeconds()-startTime), toBW(totalBytes, (CycleTimer::currentSeconds()-startTime))); dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE, 1); int num_block_x = (width+BLOCK_SIZE-1) / BLOCK_SIZE; int num_block_y = (height+BLOCK_SIZE-1) / BLOCK_SIZE; dim3 gridDim(num_block_x, num_block_y, 1); hipLaunchKernelGGL(( compute_weight_kernel), dim3(gridDim), dim3(blockDim), 0, 0, mask_device, s->load_factor, initial_load_factor_device, rat_count_device, infectious_rat_count_device, neighbor_device, neighbor_start_device, weight_result_device, width, height); hipDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); hipError_t errCode = hipPeekAtLastError(); if (errCode != hipSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode)); } double overallDuration = endTime - startTime; // printf("compute_weights: Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); } extern "C" void find_all_sums_cuda(state_t *s){ graph_t *g = s->g; int nnode = g->nnode; int width = g->width; int height = g->height; int nhub = g->nhub; double startTime = CycleTimer::currentSeconds(); int totalBytes = sizeof(double) * nnode; dim3 hubBlockDim(HUB_BLOCK_SIZE); int hub_num_block_x = (nhub + HUB_BLOCK_SIZE - 1) / HUB_BLOCK_SIZE; dim3 hubGridDim(hub_num_block_x); hipLaunchKernelGGL(( find_all_sums_hub_kernel), dim3(hubGridDim), dim3(hubBlockDim), 0, 0, hub_device, nhub, weight_result_device, neighbor_device, neighbor_start_device, sum_weight_result_device); // hipDeviceSynchronize(); // printf("find_sums: Overall normal node: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * (CycleTimer::currentSeconds()-startTime), toBW(totalBytes, (CycleTimer::currentSeconds()-startTime))); dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE, 1); int num_block_x = (width+BLOCK_SIZE-1) / BLOCK_SIZE; int num_block_y = (height+BLOCK_SIZE-1) / BLOCK_SIZE; dim3 gridDim(num_block_x, num_block_y, 1); hipLaunchKernelGGL(( find_all_sums_kernel), dim3(gridDim), dim3(blockDim), 0, 0, mask_device, weight_result_device, neighbor_device, neighbor_start_device, sum_weight_result_device, width, height); hipDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); hipError_t errCode = hipPeekAtLastError(); if (errCode != hipSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode)); } double overallDuration = endTime - startTime; // printf("find_sums: Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); } extern "C" void next_move_cuda(state_t *s, double batch_fraction){ graph_t *g = s->g; int nnode = g->nnode; int width = g->width; int height = g->height; double startTime = CycleTimer::currentSeconds(); int totalBytes = sizeof(double) * nnode; dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE, 1); int num_block_x = (width+BLOCK_SIZE-1) / BLOCK_SIZE; int num_block_y = (height+BLOCK_SIZE-1) / BLOCK_SIZE; dim3 gridDim(num_block_x, num_block_y, 1); hipLaunchKernelGGL(( next_move_kernel), dim3(gridDim), dim3(blockDim), 0, 0, rat_count_device, healthy_rat_count_device, exposed_rat_count_device, infectious_rat_count_device, weight_result_device, sum_weight_result_device, neighbor_device, neighbor_start_device, width, height, batch_fraction); hipDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); hipError_t errCode = hipPeekAtLastError(); if (errCode != hipSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode)); } double overallDuration = endTime - startTime; // printf("find_sums: Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); } extern "C" void compute_infection_prob_and_update_status_cuda(state_t *s, double batch_fraction){ graph_t *g = s->g; int nnode = g->nnode; int width = g->width; int height = g->height; double startTime = CycleTimer::currentSeconds(); int totalBytes = sizeof(double) * nnode; dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE, 1); int num_block_x = (width+BLOCK_SIZE-1) / BLOCK_SIZE; int num_block_y = (height+BLOCK_SIZE-1) / BLOCK_SIZE; dim3 gridDim(num_block_x, num_block_y, 1); hipLaunchKernelGGL(( compute_infection_prob_and_update_status_kernel), dim3(gridDim), dim3(blockDim), 0, 0, s->alpha, s->beta, rat_count_device, healthy_rat_count_device, exposed_rat_count_device, infectious_rat_count_device, width, height, batch_fraction); hipDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); hipError_t errCode = hipPeekAtLastError(); if (errCode != hipSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode)); } double overallDuration = endTime - startTime; // printf("find_sums: Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; hipError_t err = hipGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
f070f0719609a85d6090a7efd1c8e291dc432316.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include <math.h> #include <vector> #include "CycleTimer.h" #include "crun.h" #include "rutil.h" #define BLOCK_SIZE 16 #define HUB_BLOCK_SIZE 32 #define TRANSITION_PROB 0.02 * 0.1 float toBW(int bytes, float sec) { return (float)(bytes) / (1024. * 1024. * 1024.) / sec; } int *hub_device; bool *mask_device; int *neighbor_device; int *neighbor_start_device; double *initial_load_factor_device; int *rat_count_device; int *healthy_rat_count_device; int *exposed_rat_count_device; int *infectious_rat_count_device; double *weight_result_device; double *sum_weight_result_device; extern "C" void init_cuda(state_t *s) { graph_t *g = s->g; int nnode = g->nnode; int nedge = g->nedge; int nhub = g->nhub; cudaMalloc(&hub_device, sizeof(int) * nhub); cudaMalloc(&mask_device, sizeof(bool) * nnode); cudaMalloc(&neighbor_device, sizeof(int) * (nnode+nedge)); cudaMalloc(&neighbor_start_device, sizeof(int) * (nnode+1)); cudaMalloc(&initial_load_factor_device, sizeof(double) * nnode); cudaMalloc(&rat_count_device, sizeof(int) * nnode); cudaMalloc(&healthy_rat_count_device, sizeof(int) * nnode); cudaMalloc(&exposed_rat_count_device, sizeof(int) * nnode); cudaMalloc(&infectious_rat_count_device, sizeof(int) * nnode); cudaMalloc(&weight_result_device, sizeof(double) * nnode); cudaMalloc(&sum_weight_result_device, sizeof(double) * nnode); cudaMemcpy(rat_count_device, s->rat_count, sizeof(int) * nnode, cudaMemcpyHostToDevice); int *temp = (int *)malloc(sizeof(int) * nnode); for (int i = 0; i < nnode; i ++) { temp[i] = s->rat_count[i] - s->exposed_rat_count[i] - s->infectious_rat_count[i]; } cudaMemcpy(healthy_rat_count_device, temp, sizeof(int) * nnode, cudaMemcpyHostToDevice); free(temp); cudaMemcpy(exposed_rat_count_device, s->exposed_rat_count, sizeof(int) * nnode, cudaMemcpyHostToDevice); cudaMemcpy(infectious_rat_count_device, s->infectious_rat_count, sizeof(int) * nnode, cudaMemcpyHostToDevice); cudaMemcpy(initial_load_factor_device, s->initial_load_factor, sizeof(double) * nnode, cudaMemcpyHostToDevice); cudaMemcpy(hub_device, g->hub, sizeof(int) * nhub, cudaMemcpyHostToDevice); cudaMemcpy(mask_device, g->mask, sizeof(bool) * nnode, cudaMemcpyHostToDevice); cudaMemcpy(neighbor_device, g->neighbor, sizeof(int) * (nnode+nedge), cudaMemcpyHostToDevice); cudaMemcpy(neighbor_start_device, g->neighbor_start, sizeof(int) * (nnode+1), cudaMemcpyHostToDevice); } extern "C" void clean_cuda() { cudaFree(hub_device); cudaFree(mask_device); cudaFree(neighbor_device); cudaFree(neighbor_start_device); cudaFree(initial_load_factor_device); cudaFree(rat_count_device); cudaFree(healthy_rat_count_device); cudaFree(exposed_rat_count_device); cudaFree(infectious_rat_count_device); cudaFree(weight_result_device); cudaFree(sum_weight_result_device); } __device__ __inline__ double mweight_kernel(double val, double optval) { double arg = 1.0 + COEFF * (val - optval); double lg = log(arg) * M_LOG2E; double denom = 1.0 + lg * lg; return 1.0/denom; } /* Compute imbalance between local and remote values */ /* Result < 0 when lcount > rcount and > 0 when lcount < rcount */ __device__ __inline__ double imbalance_kernel(int lcount, int rcount) { if (lcount == 0 && rcount == 0) return 0.0; double sl = sqrt((double) lcount); double sr = sqrt((double) rcount); return (sr-sl)/(sr+sl); } __device__ __inline__ double imbalance_density_kernel(double ldensity, double rdensity) { return (rdensity - ldensity) / (rdensity + ldensity); } __device__ __inline__ double neighbor_ilf_fast_kernel(double load_factor, double *initial_load_factor, int* share_rat_count, int* share_infectious_rat_count, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int nid = y * width + x; // thread_index is node id int share_block_size = BLOCK_SIZE+2; int in_block_x = threadIdx.x + 1; int in_block_y = threadIdx.y + 1; int in_block_id = in_block_x + in_block_y * share_block_size; double sum = 0.0; double ldensity, rdensity; int remote_x, remote_y, remote_in_block_x, remote_in_block_y, remote_nid, remote_in_block_id; int outdegree = 4; if (x == 0) { outdegree--; } if (y == 0) { outdegree--; } if (x == width -1) { outdegree--; } if (y == height-1) { outdegree--; } ldensity = (share_rat_count[in_block_id] == 0) ? 0.0 : 1.0 * share_infectious_rat_count[in_block_id] / share_rat_count[in_block_id]; //up remote_x = x; remote_y = y+1; remote_nid = remote_x + remote_y * width; remote_in_block_x = in_block_x; remote_in_block_y = in_block_y+1; remote_in_block_id = remote_in_block_x + remote_in_block_y * share_block_size; if (remote_y < height) { rdensity = (share_rat_count[remote_in_block_id] == 0) ? 0.0 : 1.0 * share_infectious_rat_count[remote_in_block_id] / share_rat_count[remote_in_block_id]; double r = (ldensity == 0.0 && rdensity == 0.0) ? 0.0 :imbalance_density_kernel(ldensity, rdensity); sum += r; } //down remote_x = x; remote_y = y-1; remote_nid = remote_x + remote_y * width; remote_in_block_x = in_block_x; remote_in_block_y = in_block_y-1; remote_in_block_id = remote_in_block_x + remote_in_block_y * share_block_size; if (remote_y >= 0) { rdensity = (share_rat_count[remote_in_block_id] == 0) ? 0.0 : 1.0 * share_infectious_rat_count[remote_in_block_id] / share_rat_count[remote_in_block_id]; double r = (ldensity == 0.0 && rdensity == 0.0) ? 0.0 :imbalance_density_kernel(ldensity, rdensity); sum += r; } //left remote_x = x-1; remote_y = y; remote_nid = remote_x + remote_y * width; remote_in_block_x = in_block_x-1; remote_in_block_y = in_block_y; remote_in_block_id = remote_in_block_x + remote_in_block_y * share_block_size; if (remote_x >= 0) { rdensity = (share_rat_count[remote_in_block_id] == 0) ? 0.0 : 1.0 * share_infectious_rat_count[remote_in_block_id] / share_rat_count[remote_in_block_id]; double r = (ldensity == 0.0 && rdensity == 0.0) ? 0.0 :imbalance_density_kernel(ldensity, rdensity); sum += r; } //right remote_x = x+1; remote_y = y; remote_nid = remote_x + remote_y * width; remote_in_block_x = in_block_x+1; remote_in_block_y = in_block_y; remote_in_block_id = remote_in_block_x + remote_in_block_y * share_block_size; if (remote_x <width) { rdensity = (share_rat_count[remote_in_block_id] == 0) ? 0.0 : 1.0 * share_infectious_rat_count[remote_in_block_id] / share_rat_count[remote_in_block_id]; double r = (ldensity == 0.0 && rdensity == 0.0) ? 0.0 :imbalance_density_kernel(ldensity, rdensity); sum += r; } // change to a new ilf, where each node has different initial base ilf double ilf = BASE_ILF * (initial_load_factor[nid] / load_factor) + ILF_VARIABILITY * (sum / outdegree); return ilf; } __device__ __inline__ double neighbor_ilf_hub_kernel(double load_factor, double *initial_load_factor, int *rat_count, int *infectious_rat_count, int *neighbor, int *neighbor_start, int nid, int max_outdegree) { int outdegree = neighbor_start[nid+1] - neighbor_start[nid] - 1; outdegree = min(outdegree, max_outdegree); int *start = &neighbor[neighbor_start[nid]+1]; int i; double sum = 0.0; for (i = 0; i < outdegree; i++) { double ldensity = (rat_count[nid] == 0) ? 0.0 : 1.0 * infectious_rat_count[nid] / rat_count[nid]; double rdensity = (rat_count[start[i]] == 0) ? 0.0 : 1.0 * infectious_rat_count[start[i]] / rat_count[start[i]]; double r = (ldensity == 0.0 && rdensity == 0.0) ? 0.0 : imbalance_density_kernel(ldensity, rdensity); sum += r; } // change to a new ilf, where each node has different initial base ilf double ilf = BASE_ILF * (initial_load_factor[nid] / load_factor) + ILF_VARIABILITY * (sum / outdegree); return ilf; } __global__ void compute_weight_hub_kernel(double load_factor, double *initial_load_factor, int* hub, int nhub, int *rat_count, int *infectious_rat_count, int *neighbor, int *neighbor_start, double* result) { // compute overall index from position of thread in current block, // and given the block we are in int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < nhub) { int nodeid = hub[x]; double ilf = neighbor_ilf_hub_kernel(load_factor, initial_load_factor, rat_count, infectious_rat_count, neighbor, neighbor_start, nodeid, INT_MAX); // INT_MAX means compute for all possible neighbors int count = rat_count[nodeid]; result[nodeid] = mweight_kernel((double) count/load_factor, ilf); } } __global__ void compute_weight_kernel(bool *mask, double load_factor, double *initial_load_factor, int *rat_count, int *infectious_rat_count, int *neighbor, int *neighbor_start, double* result, int width, int height) { // compute overall index from position of thread in current block, // and given the block we are in int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int thread_index = y * width + x; // thread_index is node id int share_block_size = BLOCK_SIZE+2; int in_block_x = threadIdx.x+1; int in_block_y = threadIdx.y+1; int in_block_id = in_block_x + in_block_y * share_block_size; __shared__ int share_rat_count[(BLOCK_SIZE+2) * (BLOCK_SIZE+2)]; __shared__ int share_infectious_rat_count[(BLOCK_SIZE+2) * (BLOCK_SIZE+2)]; share_rat_count[in_block_id] = rat_count[thread_index]; share_infectious_rat_count[in_block_id] = infectious_rat_count[thread_index]; if (threadIdx.x == 0 && x > 0) { share_rat_count[in_block_id-1] = rat_count[thread_index-1]; share_infectious_rat_count[in_block_id-1] = infectious_rat_count[thread_index-1]; } if (threadIdx.x == BLOCK_SIZE-1 && x < width - 1) { share_rat_count[in_block_id+1] = rat_count[thread_index+1]; share_infectious_rat_count[in_block_id+1] = infectious_rat_count[thread_index+1]; } if (threadIdx.y == 0 && y > 0) { share_rat_count[in_block_id - share_block_size] = rat_count[thread_index - width]; share_infectious_rat_count[in_block_id - share_block_size] = infectious_rat_count[thread_index - width]; } if (threadIdx.y == BLOCK_SIZE-1 && y < height - 1) { share_rat_count[in_block_id + share_block_size] = rat_count[thread_index + width]; share_infectious_rat_count[in_block_id + share_block_size] = infectious_rat_count[thread_index + width]; } if (x < width && y < height && mask[thread_index]){ // double ilf = neighbor_ilf_hub_kernel(load_factor, initial_load_factor, rat_count, infectious_rat_count, neighbor, neighbor_start, thread_index, HUB_THREASHOLD); // INT_MAX means compute for all possible neighbors double ilf = neighbor_ilf_fast_kernel(load_factor, initial_load_factor, share_rat_count, share_infectious_rat_count, width, height); int count = share_rat_count[in_block_id]; result[thread_index] = mweight_kernel((double) count/load_factor, ilf); } } __global__ void find_all_sums_hub_kernel(int* hub, int nhub, double *node_weight, int *neighbor, int *neighbor_start, double *sum_weight_result){ int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < nhub) { int nid = hub[x]; double sum = 0.0; for (int eid = neighbor_start[nid]; eid < neighbor_start[nid+1]; eid++) { // this eid is just index of the neighbor in the neighbor array sum += node_weight[neighbor[eid]]; } sum_weight_result[nid] = sum; } } __global__ void find_all_sums_kernel(bool *mask, double *node_weight, int *neighbor, int *neighbor_start, double *sum_weight_result, int width, int height){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int nid = y * width + x; // thread_index is node id if (x < width && y < height && mask[nid]){ double sum = 0.0; int end = min(neighbor_start[nid+1], neighbor_start[nid]+HUB_THREASHOLD+1); //+1 because HUB_THREASHOLD is out degree for (int eid = neighbor_start[nid]; eid < end; eid++) { // this eid is just index of the neighbor in the neighbor array sum += node_weight[neighbor[eid]]; } sum_weight_result[nid] = sum; } } __global__ void next_move_kernel(int *rat_count, int *healthy_rat_count, int *exposed_rat_count, int *infectious_rat_count, double *node_weight, double *sum_weight_result,int *neighbor, int *neighbor_start, int width, int height, double batch_fraction){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int nid = y * width + x; if (x < width && y < height){ for (int eid = neighbor_start[nid]; eid < neighbor_start[nid+1]; eid++) { int remote_node = neighbor[eid]; double move_prob = batch_fraction * node_weight[remote_node] / sum_weight_result[nid]; // check 0 int move_rat = rat_count[nid] * move_prob; int move_healthy = healthy_rat_count[nid] * move_prob; int move_exposed = exposed_rat_count[nid] * move_prob; int move_infectious = infectious_rat_count[nid] * move_prob; atomicAdd(&rat_count[remote_node], move_rat); atomicAdd(&healthy_rat_count[remote_node], move_healthy); atomicAdd(&exposed_rat_count[remote_node], move_exposed); atomicAdd(&infectious_rat_count[remote_node], move_infectious); rat_count[nid] -= move_rat; healthy_rat_count[nid] -= move_healthy; exposed_rat_count[nid] -= move_exposed; infectious_rat_count[nid] -= move_infectious; } } } __device__ __inline__ double compute_single_infection_prob(int alpha, int beta, int rat_count, int exposed_rat_count, int infectious_rat_count) { // note here we only pass values of one node if (rat_count == 0) { return 0.0; } else { double density_of_exposed = exposed_rat_count / rat_count; double density_of_infectious = infectious_rat_count / rat_count; return alpha * density_of_infectious + beta * density_of_exposed; } } __global__ void compute_infection_prob_and_update_status_kernel(int alpha, int beta, int *rat_count, int *healthy_rat_count, int *exposed_rat_count, int *infectious_rat_count, int width, int height, double batch_fraction){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int nid = y * width + x; double batch_transition_prob = TRANSITION_PROB * batch_fraction; if (x < width && y < height){ double prob = compute_single_infection_prob(alpha, beta, rat_count[nid], exposed_rat_count[nid], infectious_rat_count[nid]); int num_healthy_to_exposed = healthy_rat_count[nid] * prob; int num_exposed_to_infectious = exposed_rat_count[nid] * batch_transition_prob; int num_infectious_to_recovery = infectious_rat_count[nid] * batch_transition_prob; healthy_rat_count[nid] = healthy_rat_count[nid] - num_healthy_to_exposed; exposed_rat_count[nid] = exposed_rat_count[nid] + num_healthy_to_exposed - num_exposed_to_infectious; infectious_rat_count[nid] = infectious_rat_count[nid] + num_exposed_to_infectious - num_infectious_to_recovery; } } extern "C" void compute_all_weights_cuda(state_t *s){ graph_t *g = s->g; int nnode = g->nnode; int width = g->width; int height = g->height; int nhub = g->nhub; int totalBytes = sizeof(double) * nnode; double startTime = CycleTimer::currentSeconds(); dim3 hubBlockDim(HUB_BLOCK_SIZE); int hub_num_block_x = (nhub + HUB_BLOCK_SIZE - 1) / HUB_BLOCK_SIZE; dim3 hubGridDim(hub_num_block_x); compute_weight_hub_kernel<<<hubGridDim, hubBlockDim>>>(s->load_factor, initial_load_factor_device, hub_device, nhub, rat_count_device,infectious_rat_count_device, neighbor_device, neighbor_start_device, weight_result_device); // cudaThreadSynchronize(); // printf("compute_weights: Overall hub: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * (CycleTimer::currentSeconds()-startTime), toBW(totalBytes, (CycleTimer::currentSeconds()-startTime))); dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE, 1); int num_block_x = (width+BLOCK_SIZE-1) / BLOCK_SIZE; int num_block_y = (height+BLOCK_SIZE-1) / BLOCK_SIZE; dim3 gridDim(num_block_x, num_block_y, 1); compute_weight_kernel<<<gridDim, blockDim>>>(mask_device, s->load_factor, initial_load_factor_device, rat_count_device, infectious_rat_count_device, neighbor_device, neighbor_start_device, weight_result_device, width, height); cudaThreadSynchronize(); double endTime = CycleTimer::currentSeconds(); cudaError_t errCode = cudaPeekAtLastError(); if (errCode != cudaSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode)); } double overallDuration = endTime - startTime; // printf("compute_weights: Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); } extern "C" void find_all_sums_cuda(state_t *s){ graph_t *g = s->g; int nnode = g->nnode; int width = g->width; int height = g->height; int nhub = g->nhub; double startTime = CycleTimer::currentSeconds(); int totalBytes = sizeof(double) * nnode; dim3 hubBlockDim(HUB_BLOCK_SIZE); int hub_num_block_x = (nhub + HUB_BLOCK_SIZE - 1) / HUB_BLOCK_SIZE; dim3 hubGridDim(hub_num_block_x); find_all_sums_hub_kernel<<<hubGridDim, hubBlockDim>>>(hub_device, nhub, weight_result_device, neighbor_device, neighbor_start_device, sum_weight_result_device); // cudaThreadSynchronize(); // printf("find_sums: Overall normal node: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * (CycleTimer::currentSeconds()-startTime), toBW(totalBytes, (CycleTimer::currentSeconds()-startTime))); dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE, 1); int num_block_x = (width+BLOCK_SIZE-1) / BLOCK_SIZE; int num_block_y = (height+BLOCK_SIZE-1) / BLOCK_SIZE; dim3 gridDim(num_block_x, num_block_y, 1); find_all_sums_kernel<<<gridDim, blockDim>>>(mask_device, weight_result_device, neighbor_device, neighbor_start_device, sum_weight_result_device, width, height); cudaThreadSynchronize(); double endTime = CycleTimer::currentSeconds(); cudaError_t errCode = cudaPeekAtLastError(); if (errCode != cudaSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode)); } double overallDuration = endTime - startTime; // printf("find_sums: Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); } extern "C" void next_move_cuda(state_t *s, double batch_fraction){ graph_t *g = s->g; int nnode = g->nnode; int width = g->width; int height = g->height; double startTime = CycleTimer::currentSeconds(); int totalBytes = sizeof(double) * nnode; dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE, 1); int num_block_x = (width+BLOCK_SIZE-1) / BLOCK_SIZE; int num_block_y = (height+BLOCK_SIZE-1) / BLOCK_SIZE; dim3 gridDim(num_block_x, num_block_y, 1); next_move_kernel<<<gridDim, blockDim>>>(rat_count_device, healthy_rat_count_device, exposed_rat_count_device, infectious_rat_count_device, weight_result_device, sum_weight_result_device, neighbor_device, neighbor_start_device, width, height, batch_fraction); cudaThreadSynchronize(); double endTime = CycleTimer::currentSeconds(); cudaError_t errCode = cudaPeekAtLastError(); if (errCode != cudaSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode)); } double overallDuration = endTime - startTime; // printf("find_sums: Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); } extern "C" void compute_infection_prob_and_update_status_cuda(state_t *s, double batch_fraction){ graph_t *g = s->g; int nnode = g->nnode; int width = g->width; int height = g->height; double startTime = CycleTimer::currentSeconds(); int totalBytes = sizeof(double) * nnode; dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE, 1); int num_block_x = (width+BLOCK_SIZE-1) / BLOCK_SIZE; int num_block_y = (height+BLOCK_SIZE-1) / BLOCK_SIZE; dim3 gridDim(num_block_x, num_block_y, 1); compute_infection_prob_and_update_status_kernel<<<gridDim, blockDim>>>(s->alpha, s->beta, rat_count_device, healthy_rat_count_device, exposed_rat_count_device, infectious_rat_count_device, width, height, batch_fraction); cudaThreadSynchronize(); double endTime = CycleTimer::currentSeconds(); cudaError_t errCode = cudaPeekAtLastError(); if (errCode != cudaSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode)); } double overallDuration = endTime - startTime; // printf("find_sums: Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
b7221afa3d336b6b1db4d2637edd7867c6d1e7c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } /* derivative of helva, from Mathematica */ __device__ hipComplex helvp(hipComplex z) { hipComplex out(jnf(2,z.r),jnf(1,z.i)); return out; } __device__ hipComplex lanna(hipComplex z) { hipComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r))); return out; } __device__ hipComplex harva(hipComplex z) { hipComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i)); return out; } __device__ hipComplex herve(hipComplex z) { hipComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r)); return out; } __device__ hipComplex alver(hipComplex z) { hipComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex alvir(hipComplex z) { hipComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex hexva(int m, hipComplex z) { hipComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex halvi(hipComplex z) { hipComplex out(j1f(z.r),-j0f(z.i)); return out; } __device__ hipComplex ahilv(hipComplex z) { hipComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ hipComplex halva(hipComplex z) { hipComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ hipComplex aciwa(hipComplex z) { hipComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex aliva(hipComplex z) { hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ hipComplex ariva(hipComplex z) { hipComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * helva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thy(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex origo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(alvir(q*z),alvir(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale =10.1; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); //hipComplex tin(1/4096.0,0.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(1.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex vue = cue*ai; hipComplex lue = unity; hipComplex rhuva(3.0,0.0); hipComplex rarva(3.0,0.0); hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); for(v=0;v<40;v++) { cue = cue - (expc(hilva(cue)*aon - ai*hinva(cue)*uon))/(hilva(cue)*aon - ai*hinva(cue)*uon); if(v %5==0) { accume = powc(aon,accume) * (expc(hilva(cue)*aon - ai*hinva(cue)*uon)); } else { accume = powc(accume,uon) * (expc(hilva(cue)*uon + ai*hinva(cue)*aon)); } } cue = accume; /*cue =cue - powc(conj(cue),conj(cue-aon*conj(cue)))-powc(conj(cue),conj(cue-uon*conj(cue)));*/ double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; if(norg(cue)>200) { d_out[i].x = 0; d_out[i].y = 0; d_out[i].z = 0; d_out[i].w = 255; } } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
b7221afa3d336b6b1db4d2637edd7867c6d1e7c6.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } /* derivative of helva, from Mathematica */ __device__ cuComplex helvp(cuComplex z) { cuComplex out(jnf(2,z.r),jnf(1,z.i)); return out; } __device__ cuComplex lanna(cuComplex z) { cuComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r))); return out; } __device__ cuComplex harva(cuComplex z) { cuComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i)); return out; } __device__ cuComplex herve(cuComplex z) { cuComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r)); return out; } __device__ cuComplex alver(cuComplex z) { cuComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex alvir(cuComplex z) { cuComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex hexva(int m, cuComplex z) { cuComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex halvi(cuComplex z) { cuComplex out(j1f(z.r),-j0f(z.i)); return out; } __device__ cuComplex ahilv(cuComplex z) { cuComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ cuComplex halva(cuComplex z) { cuComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ cuComplex aciwa(cuComplex z) { cuComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex aliva(cuComplex z) { cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ cuComplex ariva(cuComplex z) { cuComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * helva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thy(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex origo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(alvir(q*z),alvir(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale =10.1; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); //cuComplex tin(1/4096.0,0.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(1.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex vue = cue*ai; cuComplex lue = unity; cuComplex rhuva(3.0,0.0); cuComplex rarva(3.0,0.0); cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); for(v=0;v<40;v++) { cue = cue - (expc(hilva(cue)*aon - ai*hinva(cue)*uon))/(hilva(cue)*aon - ai*hinva(cue)*uon); if(v %5==0) { accume = powc(aon,accume) * (expc(hilva(cue)*aon - ai*hinva(cue)*uon)); } else { accume = powc(accume,uon) * (expc(hilva(cue)*uon + ai*hinva(cue)*aon)); } } cue = accume; /*cue =cue - powc(conj(cue),conj(cue-aon*conj(cue)))-powc(conj(cue),conj(cue-uon*conj(cue)));*/ double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; if(norg(cue)>200) { d_out[i].x = 0; d_out[i].y = 0; d_out[i].z = 0; d_out[i].w = 255; } } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
b1c8249233d9965a660952c4ffce41f036ff0195.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_CUDNN #include <vector> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { __global__ void sync_conv_groups() { } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { cudnnConvolutionFwdAlgo_t algo; // pick the convolution algorithm // TODO(shelhamer) this should be done during reshape // TODO(shelhamer) the choice of automatic or manual algorithm picking // should be exposed in proto CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[g], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, // memoryLimitInBytes, &algo)); // get minimum size of the workspace needed for the desired algorithm size_t workspaceSizeInBytes_temp = 0; CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle_[g], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], algo, &workspaceSizeInBytes)); if (workspaceSizeInBytes_temp > workspaceSizeInBytes) { workspaceSizeInBytes = workspaceSizeInBytes_temp; // free the existing workspace and allocate a new (larger) one hipFree(this->workspace); hipMalloc(&(this->workspace), workspaceSizeInBytes); } // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + weight_offset_ * g, conv_descs_[i], algo, workspace, workspaceSizeInBytes, cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C, cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, ); } } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, weight + weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, ); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); } // namespace caffe #endif
b1c8249233d9965a660952c4ffce41f036ff0195.cu
#ifdef USE_CUDNN #include <vector> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { __global__ void sync_conv_groups() { } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { cudnnConvolutionFwdAlgo_t algo; // pick the convolution algorithm // TODO(shelhamer) this should be done during reshape // TODO(shelhamer) the choice of automatic or manual algorithm picking // should be exposed in proto CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[g], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, // memoryLimitInBytes, &algo)); // get minimum size of the workspace needed for the desired algorithm size_t workspaceSizeInBytes_temp = 0; CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle_[g], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], algo, &workspaceSizeInBytes)); if (workspaceSizeInBytes_temp > workspaceSizeInBytes) { workspaceSizeInBytes = workspaceSizeInBytes_temp; // free the existing workspace and allocate a new (larger) one cudaFree(this->workspace); cudaMalloc(&(this->workspace), workspaceSizeInBytes); } // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + weight_offset_ * g, conv_descs_[i], algo, workspace, workspaceSizeInBytes, cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C, cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups<<<1, 1>>>(); } } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, weight + weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups<<<1, 1>>>(); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); } // namespace caffe #endif
01bf13bece0dcbec0a0a36c41340761a1c0ad438.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include "metropolis_cuda.cuh" /* Raandomly shuffle the path */ __device__ void shuffle(int *path, hiprandState_t localState, int path_length) { for (int i = 1; i < (path_length - 1); i++) { int j = int(hiprand_uniform(&localState) * float(path_length - 3.0) + 1.0); int temp = path[j]; path[j] = path[i]; path[i] = temp; } } /* Calculate the energy of a given path with the given distance/cost matrix */ __device__ float calculate_energy(int *path, float *dist_matrix, int num_cities, int path_length) { float E = 0.0; for (int i = 0; i < (path_length - 1); i++) { E += dist_matrix[path[i] * num_cities + path[i+1]]; } return E; } /* Given a path, distance/cost matrix, the old energy of this path, the two indices between which all cities will be reversed, the number of cities, and the path_length, compute the new energy if we were to perform the reversal. */ __device__ float update_energy(int *path, float *dist_matrix, float E_old, int idx1, int idx2, int num_cities, int path_length) { float E_new = E_old; if (idx1 != idx2) { int start_city = path[idx1]; int end_city = path[idx2]; E_new -= dist_matrix[path[idx1-1] * num_cities + start_city]; E_new -= dist_matrix[end_city * num_cities + path[idx2+1]]; E_new += dist_matrix[path[idx1-1] * num_cities + path[idx2]]; E_new += dist_matrix[path[idx1] * num_cities + path[idx2+1]]; } return E_new; } __device__ float calc_pi_ratio(float E_old, float E_new, float temp) { return exp(-1.0 / temp * (E_new - E_old)); } /* Arguments: path is an integer array of size num_simulations * num_cities, that contains the paths for each of the simulations we are running dist_matrix is a float array of size num_cities * num_cities, that contains all of the distance information between cities states is a hiprandState_t array of size num_simulations that contains the states of all of the PRNGs energies is a float array of size num_simulations that contains the energy/cost values for all of our simulations temperatures is a float array of size num_simulations that contains the current temperature for each simulation num_cities is how many cities we have path_length is the length of our path num_simulations is how many simulations we are running This kernel performs one iteration of the simulated annealing algorithm for each of our simulations. */ __global__ void cudaParallelMetropolisKernel(int *path, float *dist_matrix, hiprandState_t *states, float *energies, float *temperatures, int num_cities, int path_length, int num_simulations) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; while (index < num_simulations) { float E_old = energies[index]; float temp = temperatures[index]; int *path_new = path + index * path_length; // Figure out which two indices in the path are going to be reversed int idx1 = int(hiprand_uniform(&states[index]) * float(path_length - 3.0) + 1.0); int idx2 = int(hiprand_uniform(&states[index]) * float(path_length - 3.0) + 1.0); int a = min(idx1, idx2); int b = max(idx1, idx2); // Calculate energy of new path with hypothetical reversal float E_new = update_energy(path_new, dist_matrix, E_old, a, b, num_cities, path_length); // Calculate energy ratio of old & new path float check = min(float(1.0), calc_pi_ratio(E_old, E_new, temp)); // Generate random number to see if we accept float u = hiprand_uniform(&states[index]); if (u < check) { // If accept, change temperature & energy & actually change path temperatures[index] = 0.9999 * temp; energies[index] = E_new; // Reverse portion of path int t; while (a < b) { t = path_new[a]; path_new[a] = path_new[b]; path_new[b] = t; a++; b--; } } index += blockDim.x * gridDim.x; } } /* Given a start and end index, initializes a random path */ __device__ void initializePath(int *path, int start, int end, hiprandState_t localState, int path_length, int num_simulations) { // Initialize path to just sequence of cities with given start & end path[0] = start; int actual_counter = 1; for (int i = 0; i < path_length; i++) { if (i != start && i != end) { path[actual_counter] = i; actual_counter++; } } path[path_length - 1] = end; // Perform shuffles of path to initialize a random path for (int i = 0; i < 50; i++) { shuffle(path, localState, path_length); } } // Initializes random initial paths, energies, temperatures, and hiprand states __global__ void cudaInitializeDataKernel(int *path, float *dist_matrix, hiprandState_t *states, float *energies, float *temperatures, float init_temp, int start, int end, int num_cities, int path_length, int num_simulations) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; while (index < num_simulations) { // Initialize hiprand state for this index hiprand_init(index, 0, 0, &states[index]); // Initialize random path initializePath(path + index * path_length, start, end, states[index], path_length, num_simulations); // Intiialize energy associated with path energies[index] = calculate_energy(path + index * path_length, dist_matrix, num_cities, path_length); // Initialize annealing temperature temperatures[index] = init_temp; index += blockDim.x * gridDim.x; } } void cudaCallParallelMetropolisKernel(const unsigned int blocks, const unsigned int threadsPerBlock, int *path, float *dist_matrix, hiprandState_t *states, float *energies, float *temperatures, int num_cities, int path_length, int num_simulations) { hipLaunchKernelGGL(( cudaParallelMetropolisKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, path, dist_matrix, states, energies, temperatures, num_cities, path_length, num_simulations); } void cudaCallInitializeDataKernel(const unsigned int blocks, const unsigned int threadsPerBlock, int *path, float *dist_matrix, hiprandState_t *states, float *energies, float *temperatures, float init_temp, int start, int end, int num_cities, int path_length, int num_simulations) { hipLaunchKernelGGL(( cudaInitializeDataKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, path, dist_matrix, states, energies, temperatures, init_temp, start, end, num_cities, path_length, num_simulations); }
01bf13bece0dcbec0a0a36c41340761a1c0ad438.cu
#include <cmath> #include <cuda_runtime.h> #include <curand_kernel.h> #include "metropolis_cuda.cuh" /* Raandomly shuffle the path */ __device__ void shuffle(int *path, curandState localState, int path_length) { for (int i = 1; i < (path_length - 1); i++) { int j = int(curand_uniform(&localState) * float(path_length - 3.0) + 1.0); int temp = path[j]; path[j] = path[i]; path[i] = temp; } } /* Calculate the energy of a given path with the given distance/cost matrix */ __device__ float calculate_energy(int *path, float *dist_matrix, int num_cities, int path_length) { float E = 0.0; for (int i = 0; i < (path_length - 1); i++) { E += dist_matrix[path[i] * num_cities + path[i+1]]; } return E; } /* Given a path, distance/cost matrix, the old energy of this path, the two indices between which all cities will be reversed, the number of cities, and the path_length, compute the new energy if we were to perform the reversal. */ __device__ float update_energy(int *path, float *dist_matrix, float E_old, int idx1, int idx2, int num_cities, int path_length) { float E_new = E_old; if (idx1 != idx2) { int start_city = path[idx1]; int end_city = path[idx2]; E_new -= dist_matrix[path[idx1-1] * num_cities + start_city]; E_new -= dist_matrix[end_city * num_cities + path[idx2+1]]; E_new += dist_matrix[path[idx1-1] * num_cities + path[idx2]]; E_new += dist_matrix[path[idx1] * num_cities + path[idx2+1]]; } return E_new; } __device__ float calc_pi_ratio(float E_old, float E_new, float temp) { return exp(-1.0 / temp * (E_new - E_old)); } /* Arguments: path is an integer array of size num_simulations * num_cities, that contains the paths for each of the simulations we are running dist_matrix is a float array of size num_cities * num_cities, that contains all of the distance information between cities states is a curandState array of size num_simulations that contains the states of all of the PRNGs energies is a float array of size num_simulations that contains the energy/cost values for all of our simulations temperatures is a float array of size num_simulations that contains the current temperature for each simulation num_cities is how many cities we have path_length is the length of our path num_simulations is how many simulations we are running This kernel performs one iteration of the simulated annealing algorithm for each of our simulations. */ __global__ void cudaParallelMetropolisKernel(int *path, float *dist_matrix, curandState *states, float *energies, float *temperatures, int num_cities, int path_length, int num_simulations) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; while (index < num_simulations) { float E_old = energies[index]; float temp = temperatures[index]; int *path_new = path + index * path_length; // Figure out which two indices in the path are going to be reversed int idx1 = int(curand_uniform(&states[index]) * float(path_length - 3.0) + 1.0); int idx2 = int(curand_uniform(&states[index]) * float(path_length - 3.0) + 1.0); int a = min(idx1, idx2); int b = max(idx1, idx2); // Calculate energy of new path with hypothetical reversal float E_new = update_energy(path_new, dist_matrix, E_old, a, b, num_cities, path_length); // Calculate energy ratio of old & new path float check = min(float(1.0), calc_pi_ratio(E_old, E_new, temp)); // Generate random number to see if we accept float u = curand_uniform(&states[index]); if (u < check) { // If accept, change temperature & energy & actually change path temperatures[index] = 0.9999 * temp; energies[index] = E_new; // Reverse portion of path int t; while (a < b) { t = path_new[a]; path_new[a] = path_new[b]; path_new[b] = t; a++; b--; } } index += blockDim.x * gridDim.x; } } /* Given a start and end index, initializes a random path */ __device__ void initializePath(int *path, int start, int end, curandState localState, int path_length, int num_simulations) { // Initialize path to just sequence of cities with given start & end path[0] = start; int actual_counter = 1; for (int i = 0; i < path_length; i++) { if (i != start && i != end) { path[actual_counter] = i; actual_counter++; } } path[path_length - 1] = end; // Perform shuffles of path to initialize a random path for (int i = 0; i < 50; i++) { shuffle(path, localState, path_length); } } // Initializes random initial paths, energies, temperatures, and curand states __global__ void cudaInitializeDataKernel(int *path, float *dist_matrix, curandState *states, float *energies, float *temperatures, float init_temp, int start, int end, int num_cities, int path_length, int num_simulations) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; while (index < num_simulations) { // Initialize curand state for this index curand_init(index, 0, 0, &states[index]); // Initialize random path initializePath(path + index * path_length, start, end, states[index], path_length, num_simulations); // Intiialize energy associated with path energies[index] = calculate_energy(path + index * path_length, dist_matrix, num_cities, path_length); // Initialize annealing temperature temperatures[index] = init_temp; index += blockDim.x * gridDim.x; } } void cudaCallParallelMetropolisKernel(const unsigned int blocks, const unsigned int threadsPerBlock, int *path, float *dist_matrix, curandState *states, float *energies, float *temperatures, int num_cities, int path_length, int num_simulations) { cudaParallelMetropolisKernel<<<blocks, threadsPerBlock>>> (path, dist_matrix, states, energies, temperatures, num_cities, path_length, num_simulations); } void cudaCallInitializeDataKernel(const unsigned int blocks, const unsigned int threadsPerBlock, int *path, float *dist_matrix, curandState *states, float *energies, float *temperatures, float init_temp, int start, int end, int num_cities, int path_length, int num_simulations) { cudaInitializeDataKernel<<<blocks, threadsPerBlock>>> (path, dist_matrix, states, energies, temperatures, init_temp, start, end, num_cities, path_length, num_simulations); }
66dbab63f48dd39ca2636aa18040a2fe73b39686.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <limits> #include <algorithm> using namespace std; #define BLOCK_SIZE 512 __global__ void reduce_max(float * in, float * out, int numel, float smallest) { //@@ Load a segment of the input vector into shared memory __shared__ float s[2 * BLOCK_SIZE]; unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x * BLOCK_SIZE; if (start + t < numel) s[t] = in[start + t]; else s[t] = smallest; if (start + BLOCK_SIZE + t < numel) s[BLOCK_SIZE + t] = in[start + BLOCK_SIZE + t]; else s[BLOCK_SIZE + t] = smallest; //@@ Traverse the reduction tree for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride >>= 1) { __syncthreads(); if (t < stride) s[t] = fmax(s[t], s[t+stride]); } //@@ Write the computed sum of the block to the output vector at the //@@ correct index if (t == 0) out[blockIdx.x] = s[0]; } void fillarray(float* in, int size){ srand(3); for (int i = 0; i < size; i++){ // in[i] = i+1; // in[i] = rand()%100-50; in[i] = rand()%100; //TODO put randomizer here } } void fillTest(float* in, int size){ // in[0]=1.0; // in[1]=1.0; // in[2]=2.0; // in[3]=3.0; // in[4]=4.0; // in[5]=5.0; // in[6]=5.0; // in[7]=6.0; // in[8]=6.0; // in[9]=6.0; // in[10]=3.0; // in[11]=3.0; // in[12]=4.0; // in[13]=4.0; // in[14]=5.0; // in[15]=5.0; for (int i = 0; i < size; i++){ in[i] = i; } } int main(int argc, char ** argv) { int ii; // wbArg_t args; float * hostInput; // The input 1D list float * hostOutput; // The output list float * deviceInput; float * deviceOutput; int numInputElements; // number of elements in the input list int numOutputElements; // number of elements in the output list float smallest = numeric_limits<float>::min(); if (argc != 2){ printf("wrong arguments\n"); return 0; } numInputElements = atoi(argv[1]); hostInput = (float*) malloc(numInputElements * sizeof(float)); fillTest (hostInput, numInputElements); numOutputElements = numInputElements / (BLOCK_SIZE<<1); if (numInputElements % (BLOCK_SIZE<<1)) { numOutputElements++; } hostOutput = (float*) malloc(numOutputElements * sizeof(float)); hipMalloc(&deviceInput, sizeof(float) * numInputElements); hipMalloc(&deviceOutput, sizeof(float) * numOutputElements); hipMemcpy(deviceInput, hostInput, sizeof(float) * numInputElements, hipMemcpyHostToDevice); dim3 dimGrid(numOutputElements, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); hipLaunchKernelGGL(( reduce_max), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInput, deviceOutput, numInputElements, smallest); hipDeviceSynchronize(); hipMemcpy(hostOutput, deviceOutput, sizeof(float) * numOutputElements, hipMemcpyDeviceToHost); for (ii = 1; ii < numOutputElements; ii++) { hostOutput[0] = max(hostOutput[ii], hostOutput[0]); } printf("Final max %f\n", hostOutput[0]); hipFree(deviceInput); hipFree(deviceOutput); free(hostInput); free(hostOutput); }
66dbab63f48dd39ca2636aa18040a2fe73b39686.cu
#include <stdlib.h> #include <stdio.h> #include <limits> #include <algorithm> using namespace std; #define BLOCK_SIZE 512 __global__ void reduce_max(float * in, float * out, int numel, float smallest) { //@@ Load a segment of the input vector into shared memory __shared__ float s[2 * BLOCK_SIZE]; unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x * BLOCK_SIZE; if (start + t < numel) s[t] = in[start + t]; else s[t] = smallest; if (start + BLOCK_SIZE + t < numel) s[BLOCK_SIZE + t] = in[start + BLOCK_SIZE + t]; else s[BLOCK_SIZE + t] = smallest; //@@ Traverse the reduction tree for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride >>= 1) { __syncthreads(); if (t < stride) s[t] = fmax(s[t], s[t+stride]); } //@@ Write the computed sum of the block to the output vector at the //@@ correct index if (t == 0) out[blockIdx.x] = s[0]; } void fillarray(float* in, int size){ srand(3); for (int i = 0; i < size; i++){ // in[i] = i+1; // in[i] = rand()%100-50; in[i] = rand()%100; //TODO put randomizer here } } void fillTest(float* in, int size){ // in[0]=1.0; // in[1]=1.0; // in[2]=2.0; // in[3]=3.0; // in[4]=4.0; // in[5]=5.0; // in[6]=5.0; // in[7]=6.0; // in[8]=6.0; // in[9]=6.0; // in[10]=3.0; // in[11]=3.0; // in[12]=4.0; // in[13]=4.0; // in[14]=5.0; // in[15]=5.0; for (int i = 0; i < size; i++){ in[i] = i; } } int main(int argc, char ** argv) { int ii; // wbArg_t args; float * hostInput; // The input 1D list float * hostOutput; // The output list float * deviceInput; float * deviceOutput; int numInputElements; // number of elements in the input list int numOutputElements; // number of elements in the output list float smallest = numeric_limits<float>::min(); if (argc != 2){ printf("wrong arguments\n"); return 0; } numInputElements = atoi(argv[1]); hostInput = (float*) malloc(numInputElements * sizeof(float)); fillTest (hostInput, numInputElements); numOutputElements = numInputElements / (BLOCK_SIZE<<1); if (numInputElements % (BLOCK_SIZE<<1)) { numOutputElements++; } hostOutput = (float*) malloc(numOutputElements * sizeof(float)); cudaMalloc(&deviceInput, sizeof(float) * numInputElements); cudaMalloc(&deviceOutput, sizeof(float) * numOutputElements); cudaMemcpy(deviceInput, hostInput, sizeof(float) * numInputElements, cudaMemcpyHostToDevice); dim3 dimGrid(numOutputElements, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); reduce_max<<<dimGrid, dimBlock>>>(deviceInput, deviceOutput, numInputElements, smallest); cudaDeviceSynchronize(); cudaMemcpy(hostOutput, deviceOutput, sizeof(float) * numOutputElements, cudaMemcpyDeviceToHost); for (ii = 1; ii < numOutputElements; ii++) { hostOutput[0] = max(hostOutput[ii], hostOutput[0]); } printf("Final max %f\n", hostOutput[0]); cudaFree(deviceInput); cudaFree(deviceOutput); free(hostInput); free(hostOutput); }
03bb8d89f0b76eb839736f62731b79128c9e55f4.hip
// !!! This is a file automatically generated by hipify!!! #include <rocblas.h> #include <hip/hip_runtime.h> #include <hipcub/hipcub.hpp> #include "blas.hpp" #include "../device_context/cuda_context.hpp" namespace mlfe{ namespace math{ template<> void gemm<float, CUDAContext>( const bool trans_a, const bool trans_b, const int m, const int n, const int k, const float alpha, const float *a_ptr, const int lda, const float *b_ptr, const int ldb, const float beta, float *c_ptr, const int ldc, CUDAContext *context ){ hipblasOperation_t cuTransA = !trans_a ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = !trans_b ? HIPBLAS_OP_N : HIPBLAS_OP_T; if (hipblasSgemm(context->GetHandler(), cuTransB, cuTransA, n, m, k, &alpha, b_ptr, (!trans_b) ? n : k, a_ptr, (!trans_a) ? k : m, &beta, c_ptr, n) != hipSuccess) { throw std::string("gemm<float, CUDAContext> : hipblasSgemm failed."); } } template<> void gemm<double, CUDAContext>( const bool trans_a, const bool trans_b, const int m, const int n, const int k, const double alpha, const double *a_ptr, const int lda, const double *b_ptr, const int ldb, const double beta, double *c_ptr, const int ldc, CUDAContext *context ){ hipblasOperation_t cuTransA = !trans_a ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = !trans_b ? HIPBLAS_OP_N : HIPBLAS_OP_T; if (hipblasDgemm(context->GetHandler(), cuTransB, cuTransA, n, m, k, &alpha, b_ptr, (!trans_b) ? n : k, a_ptr, (!trans_a) ? k : m, &beta, c_ptr, n) != hipSuccess) { throw std::string("gemm<float, CUDAContext> : hipblasDgemm failed."); } } template <> void gemv<float, CUDAContext>( const bool trans_a, const int m, const int n, const float alpha, const float *a_ptr, const int lda, const float *b_ptr, const float beta, float *c_ptr, const int ldc, CUDAContext *context ){ hipblasOperation_t cuTransA = (!trans_a) ? HIPBLAS_OP_T : HIPBLAS_OP_N; if (hipblasSgemv( context->GetHandler(), cuTransA, n, m, &alpha, a_ptr, n, b_ptr, 1, &beta, c_ptr, 1) != hipSuccess) { throw std::string("gemv<float, CUDAContext> : hipblasSgemv failed."); } } template <> void gemv<double, CUDAContext>( const bool trans_a, const int m, const int n, const double alpha, const double *a_ptr, const int lda, const double *b_ptr, const double beta, double *c_ptr, const int ldc, CUDAContext *context ){ hipblasOperation_t cuTransA = (!trans_a) ? HIPBLAS_OP_T : HIPBLAS_OP_N; if (hipblasDgemv( context->GetHandler(), cuTransA, n, m, &alpha, a_ptr, n, b_ptr, 1, &beta, c_ptr, 1) != hipSuccess) { throw std::string("gemv<float, CUDAContext> : hipblasDgemv failed."); } } template <class DataType> __global__ void rowwise_max_kernel( const int rows, const int cols, const DataType *data, DataType *out ){ typedef hipcub::BlockReduce<float, CUDA_CONTEXT_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int rowIndex = blockIdx.x; rowIndex < rows; rowIndex += gridDim.x) { DataType maxval = static_cast<DataType>(-FLT_MAX); for (int colIndex = threadIdx.x; colIndex < cols; colIndex += blockDim.x) { maxval = max(data[rowIndex * cols + colIndex], maxval); } maxval = BlockReduce(temp_storage).Reduce(maxval, hipcub::Max()); if (threadIdx.x == 0) { out[rowIndex] = maxval; } __syncthreads(); } } template <> void rowwise_max<float, CUDAContext>( const int m, const int n, const float *a_ptr, float *b_ptr ){ hipLaunchKernelGGL(( rowwise_max_kernel<float>), dim3(std::min<int>(n, CUDA_CONTEXT_MAXIMUM_NUM_BLOCKS)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, m, n, a_ptr, b_ptr); } template <> void rowwise_max<double, CUDAContext>( const int m, const int n, const double *a_ptr, double *b_ptr ){ hipLaunchKernelGGL(( rowwise_max_kernel<double>), dim3(std::min<int>(n, CUDA_CONTEXT_MAXIMUM_NUM_BLOCKS)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, m, n, a_ptr, b_ptr); } template <> void rowwise_normalize<float, CUDAContext>( const int m, const int n, const float *scaler_ptr, float *norm_dest ){ } template <> void rowwise_normalize<double, CUDAContext>( const int m, const int n, const double *scaler_ptr, double *norm_dest ){ } template <class DataType> __global__ void ProbCrossEntropyKernel( const int N, const int D, const DataType *Pdata, const DataType *labeldata, DataType* Ydata ){ typedef hipcub::BlockReduce<float, CUDA_CONTEXT_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int i = blockIdx.x; i < N; i += gridDim.x) { DataType sum = static_cast<DataType>(0); DataType total_prob = static_cast<DataType>(0); for (int j = threadIdx.x; j < D; j += blockDim.x) { int idx = i * D + j; total_prob += labeldata[idx]; sum += -log(max(Pdata[idx], static_cast<DataType>(FLT_MIN)) ) * labeldata[idx]; } DataType tot = BlockReduce(temp_storage).Sum(sum); __syncthreads(); DataType total_prob_sum = BlockReduce(temp_storage).Sum(total_prob); if (threadIdx.x == 0) { Ydata[i] = tot; // Sanity check //CUDA_KERNEL_ASSERT(abs(1.0 - total_prob_sum) < 1e-5f); } __syncthreads(); } } template <> void cross_entropy<float, CUDAContext>( const int m, const int n, const float *prob_ptr, const float *label_ptr, float *loss_ptr ){ hipLaunchKernelGGL(( ProbCrossEntropyKernel<float>), dim3(CUDA_CONTEXT_GET_BLOCKS(m * n)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, m, n, prob_ptr, label_ptr, loss_ptr); } template <> void cross_entropy<double, CUDAContext>( const int m, const int n, const double *prob_ptr, const double *label_ptr, double *loss_ptr ){ hipLaunchKernelGGL(( ProbCrossEntropyKernel<double>), dim3(CUDA_CONTEXT_GET_BLOCKS(m * n)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, m, n, prob_ptr, label_ptr, loss_ptr); } template <class DataType> __global__ void CrossEntropyGradientKernel( const int N, const int D, const DataType* Pdata, const DataType* labeldata, DataType* dXdata ){ CUDA_1D_KERNEL_LOOP(idx, N * D){ dXdata[idx] = Pdata[idx] - labeldata[idx]; } } template <> void cross_entropy_gradients<float, CUDAContext>( const int m, const int n, const float *prob_ptr, const float *label_ptr, float *dx_ptr ){ hipLaunchKernelGGL(( CrossEntropyGradientKernel<float>), dim3(CUDA_CONTEXT_GET_BLOCKS(m * n)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, m, n, prob_ptr, label_ptr, dx_ptr); } template <> void cross_entropy_gradients<double, CUDAContext>( const int m, const int n, const double *prob_ptr, const double *label_ptr, double *dx_ptr ){ hipLaunchKernelGGL(( CrossEntropyGradientKernel<double>), dim3(CUDA_CONTEXT_GET_BLOCKS(m * n)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, m, n, prob_ptr, label_ptr, dx_ptr); } template <class DataType> __global__ void exp_kernel( const int size, const DataType *x, DataType *y ){ CUDA_1D_KERNEL_LOOP(index, size) { y[index] = ::exp(x[index]); } } template<> void exp<float, CUDAContext>( const int size, const float *x_ptr, float *y_ptr){ hipLaunchKernelGGL(( exp_kernel<float>), dim3(CUDA_CONTEXT_GET_BLOCKS(size)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, size, x_ptr, y_ptr); } template<> void exp<double, CUDAContext>( const int size, const double *x_ptr, double *y_ptr){ hipLaunchKernelGGL(( exp_kernel<double>), dim3(CUDA_CONTEXT_GET_BLOCKS(size)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, size, x_ptr, y_ptr); } template <class DataType> __global__ void axpy_kernel( const int size, const DataType a, const DataType *x, DataType *y ){ CUDA_1D_KERNEL_LOOP(index, size) { y[index] = a * x[index] + y[index]; } } template<> void axpy<float, CUDAContext>( int size, const float alpha, const float *x_ptr, float *y_ptr ){ hipLaunchKernelGGL(( axpy_kernel<float>), dim3(CUDA_CONTEXT_GET_BLOCKS(size)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, size, alpha, x_ptr, y_ptr); } template<> void axpy<double, CUDAContext>( int size, const double alpha, const double *x_ptr, double *y_ptr ){ hipLaunchKernelGGL(( axpy_kernel<double>), dim3(CUDA_CONTEXT_GET_BLOCKS(size)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, size, alpha, x_ptr, y_ptr); } template <class DataType> __global__ void scale_kernel( const int size, const DataType *x, const DataType a, DataType *y ){ CUDA_1D_KERNEL_LOOP(index, size) { y[index] = a * x[index]; } } template <> void scal<float, CUDAContext>( const int size, const float alpha, const float *x_ptr, float *y_ptr ){ hipLaunchKernelGGL(( scale_kernel<float>), dim3(CUDA_CONTEXT_GET_BLOCKS(size)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, size, x_ptr, alpha, y_ptr); } template <> void scal<double, CUDAContext>( const int size, const double alpha, const double *x_ptr, double *y_ptr ){ hipLaunchKernelGGL(( scale_kernel<double>), dim3(CUDA_CONTEXT_GET_BLOCKS(size)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, size, x_ptr, alpha, y_ptr); } template <typename DataType> __global__ void SumKernel( const int N, const DataType* X, DataType* Y ){ const int idx = threadIdx.x; __shared__ float reduction_buffer[128]; reduction_buffer[idx] = 0; // A multilevel reduction. // N -> 128 for (int i = idx; i < N; i += 128) { reduction_buffer[idx] += static_cast<float>(X[i]); } __syncthreads(); // 128 -> 32 if (idx < 32) { reduction_buffer[idx] += reduction_buffer[idx + 32] + reduction_buffer[idx + 64] + reduction_buffer[idx + 96]; } __syncthreads(); // 32 -> 1 if (idx == 0) { float tmp = 0; for (int i = 0; i < 32; ++i) { tmp += reduction_buffer[i]; } *Y = static_cast<DataType>(tmp); } } template <> void sum<float, CUDAContext>( const int size, const float *x_ptr, float *y_ptr){ hipLaunchKernelGGL(( SumKernel<float>), dim3(1), dim3(128), 0, 0, size, x_ptr, y_ptr); } template <> void sum<double, CUDAContext>( const int size, const double *x_ptr, double *y_ptr){ hipLaunchKernelGGL(( SumKernel<double>), dim3(1), dim3(128), 0, 0, size, x_ptr, y_ptr); } } /* math */ } /* mlfe */
03bb8d89f0b76eb839736f62731b79128c9e55f4.cu
#include <cublas_v2.h> #include <cuda_runtime.h> #include <cub/block/block_reduce.cuh> #include "blas.hpp" #include "../device_context/cuda_context.hpp" namespace mlfe{ namespace math{ template<> void gemm<float, CUDAContext>( const bool trans_a, const bool trans_b, const int m, const int n, const int k, const float alpha, const float *a_ptr, const int lda, const float *b_ptr, const int ldb, const float beta, float *c_ptr, const int ldc, CUDAContext *context ){ cublasOperation_t cuTransA = !trans_a ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = !trans_b ? CUBLAS_OP_N : CUBLAS_OP_T; if (cublasSgemm(context->GetHandler(), cuTransB, cuTransA, n, m, k, &alpha, b_ptr, (!trans_b) ? n : k, a_ptr, (!trans_a) ? k : m, &beta, c_ptr, n) != cudaSuccess) { throw std::string("gemm<float, CUDAContext> : cublasSgemm failed."); } } template<> void gemm<double, CUDAContext>( const bool trans_a, const bool trans_b, const int m, const int n, const int k, const double alpha, const double *a_ptr, const int lda, const double *b_ptr, const int ldb, const double beta, double *c_ptr, const int ldc, CUDAContext *context ){ cublasOperation_t cuTransA = !trans_a ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = !trans_b ? CUBLAS_OP_N : CUBLAS_OP_T; if (cublasDgemm(context->GetHandler(), cuTransB, cuTransA, n, m, k, &alpha, b_ptr, (!trans_b) ? n : k, a_ptr, (!trans_a) ? k : m, &beta, c_ptr, n) != cudaSuccess) { throw std::string("gemm<float, CUDAContext> : cublasDgemm failed."); } } template <> void gemv<float, CUDAContext>( const bool trans_a, const int m, const int n, const float alpha, const float *a_ptr, const int lda, const float *b_ptr, const float beta, float *c_ptr, const int ldc, CUDAContext *context ){ cublasOperation_t cuTransA = (!trans_a) ? CUBLAS_OP_T : CUBLAS_OP_N; if (cublasSgemv( context->GetHandler(), cuTransA, n, m, &alpha, a_ptr, n, b_ptr, 1, &beta, c_ptr, 1) != cudaSuccess) { throw std::string("gemv<float, CUDAContext> : cublasSgemv failed."); } } template <> void gemv<double, CUDAContext>( const bool trans_a, const int m, const int n, const double alpha, const double *a_ptr, const int lda, const double *b_ptr, const double beta, double *c_ptr, const int ldc, CUDAContext *context ){ cublasOperation_t cuTransA = (!trans_a) ? CUBLAS_OP_T : CUBLAS_OP_N; if (cublasDgemv( context->GetHandler(), cuTransA, n, m, &alpha, a_ptr, n, b_ptr, 1, &beta, c_ptr, 1) != cudaSuccess) { throw std::string("gemv<float, CUDAContext> : cublasDgemv failed."); } } template <class DataType> __global__ void rowwise_max_kernel( const int rows, const int cols, const DataType *data, DataType *out ){ typedef cub::BlockReduce<float, CUDA_CONTEXT_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int rowIndex = blockIdx.x; rowIndex < rows; rowIndex += gridDim.x) { DataType maxval = static_cast<DataType>(-FLT_MAX); for (int colIndex = threadIdx.x; colIndex < cols; colIndex += blockDim.x) { maxval = max(data[rowIndex * cols + colIndex], maxval); } maxval = BlockReduce(temp_storage).Reduce(maxval, cub::Max()); if (threadIdx.x == 0) { out[rowIndex] = maxval; } __syncthreads(); } } template <> void rowwise_max<float, CUDAContext>( const int m, const int n, const float *a_ptr, float *b_ptr ){ rowwise_max_kernel<float><<< std::min<int>(n, CUDA_CONTEXT_MAXIMUM_NUM_BLOCKS), CUDA_CONTEXT_NUM_THREADS>>>(m, n, a_ptr, b_ptr); } template <> void rowwise_max<double, CUDAContext>( const int m, const int n, const double *a_ptr, double *b_ptr ){ rowwise_max_kernel<double><<< std::min<int>(n, CUDA_CONTEXT_MAXIMUM_NUM_BLOCKS), CUDA_CONTEXT_NUM_THREADS>>>(m, n, a_ptr, b_ptr); } template <> void rowwise_normalize<float, CUDAContext>( const int m, const int n, const float *scaler_ptr, float *norm_dest ){ } template <> void rowwise_normalize<double, CUDAContext>( const int m, const int n, const double *scaler_ptr, double *norm_dest ){ } template <class DataType> __global__ void ProbCrossEntropyKernel( const int N, const int D, const DataType *Pdata, const DataType *labeldata, DataType* Ydata ){ typedef cub::BlockReduce<float, CUDA_CONTEXT_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int i = blockIdx.x; i < N; i += gridDim.x) { DataType sum = static_cast<DataType>(0); DataType total_prob = static_cast<DataType>(0); for (int j = threadIdx.x; j < D; j += blockDim.x) { int idx = i * D + j; total_prob += labeldata[idx]; sum += -log(max(Pdata[idx], static_cast<DataType>(FLT_MIN)) ) * labeldata[idx]; } DataType tot = BlockReduce(temp_storage).Sum(sum); __syncthreads(); DataType total_prob_sum = BlockReduce(temp_storage).Sum(total_prob); if (threadIdx.x == 0) { Ydata[i] = tot; // Sanity check //CUDA_KERNEL_ASSERT(abs(1.0 - total_prob_sum) < 1e-5f); } __syncthreads(); } } template <> void cross_entropy<float, CUDAContext>( const int m, const int n, const float *prob_ptr, const float *label_ptr, float *loss_ptr ){ ProbCrossEntropyKernel<float><<< CUDA_CONTEXT_GET_BLOCKS(m * n), CUDA_CONTEXT_NUM_THREADS>>>(m, n, prob_ptr, label_ptr, loss_ptr); } template <> void cross_entropy<double, CUDAContext>( const int m, const int n, const double *prob_ptr, const double *label_ptr, double *loss_ptr ){ ProbCrossEntropyKernel<double><<< CUDA_CONTEXT_GET_BLOCKS(m * n), CUDA_CONTEXT_NUM_THREADS>>>(m, n, prob_ptr, label_ptr, loss_ptr); } template <class DataType> __global__ void CrossEntropyGradientKernel( const int N, const int D, const DataType* Pdata, const DataType* labeldata, DataType* dXdata ){ CUDA_1D_KERNEL_LOOP(idx, N * D){ dXdata[idx] = Pdata[idx] - labeldata[idx]; } } template <> void cross_entropy_gradients<float, CUDAContext>( const int m, const int n, const float *prob_ptr, const float *label_ptr, float *dx_ptr ){ CrossEntropyGradientKernel<float><<< CUDA_CONTEXT_GET_BLOCKS(m * n), CUDA_CONTEXT_NUM_THREADS>>>(m, n, prob_ptr, label_ptr, dx_ptr); } template <> void cross_entropy_gradients<double, CUDAContext>( const int m, const int n, const double *prob_ptr, const double *label_ptr, double *dx_ptr ){ CrossEntropyGradientKernel<double><<< CUDA_CONTEXT_GET_BLOCKS(m * n), CUDA_CONTEXT_NUM_THREADS>>>(m, n, prob_ptr, label_ptr, dx_ptr); } template <class DataType> __global__ void exp_kernel( const int size, const DataType *x, DataType *y ){ CUDA_1D_KERNEL_LOOP(index, size) { y[index] = std::exp(x[index]); } } template<> void exp<float, CUDAContext>( const int size, const float *x_ptr, float *y_ptr){ exp_kernel<float><<<CUDA_CONTEXT_GET_BLOCKS(size), CUDA_CONTEXT_NUM_THREADS>>>(size, x_ptr, y_ptr); } template<> void exp<double, CUDAContext>( const int size, const double *x_ptr, double *y_ptr){ exp_kernel<double><<<CUDA_CONTEXT_GET_BLOCKS(size), CUDA_CONTEXT_NUM_THREADS>>>(size, x_ptr, y_ptr); } template <class DataType> __global__ void axpy_kernel( const int size, const DataType a, const DataType *x, DataType *y ){ CUDA_1D_KERNEL_LOOP(index, size) { y[index] = a * x[index] + y[index]; } } template<> void axpy<float, CUDAContext>( int size, const float alpha, const float *x_ptr, float *y_ptr ){ axpy_kernel<float><<<CUDA_CONTEXT_GET_BLOCKS(size), CUDA_CONTEXT_NUM_THREADS>>>(size, alpha, x_ptr, y_ptr); } template<> void axpy<double, CUDAContext>( int size, const double alpha, const double *x_ptr, double *y_ptr ){ axpy_kernel<double><<<CUDA_CONTEXT_GET_BLOCKS(size), CUDA_CONTEXT_NUM_THREADS>>>(size, alpha, x_ptr, y_ptr); } template <class DataType> __global__ void scale_kernel( const int size, const DataType *x, const DataType a, DataType *y ){ CUDA_1D_KERNEL_LOOP(index, size) { y[index] = a * x[index]; } } template <> void scal<float, CUDAContext>( const int size, const float alpha, const float *x_ptr, float *y_ptr ){ scale_kernel<float><<<CUDA_CONTEXT_GET_BLOCKS(size), CUDA_CONTEXT_NUM_THREADS>>>(size, x_ptr, alpha, y_ptr); } template <> void scal<double, CUDAContext>( const int size, const double alpha, const double *x_ptr, double *y_ptr ){ scale_kernel<double><<<CUDA_CONTEXT_GET_BLOCKS(size), CUDA_CONTEXT_NUM_THREADS>>>(size, x_ptr, alpha, y_ptr); } template <typename DataType> __global__ void SumKernel( const int N, const DataType* X, DataType* Y ){ const int idx = threadIdx.x; __shared__ float reduction_buffer[128]; reduction_buffer[idx] = 0; // A multilevel reduction. // N -> 128 for (int i = idx; i < N; i += 128) { reduction_buffer[idx] += static_cast<float>(X[i]); } __syncthreads(); // 128 -> 32 if (idx < 32) { reduction_buffer[idx] += reduction_buffer[idx + 32] + reduction_buffer[idx + 64] + reduction_buffer[idx + 96]; } __syncthreads(); // 32 -> 1 if (idx == 0) { float tmp = 0; for (int i = 0; i < 32; ++i) { tmp += reduction_buffer[i]; } *Y = static_cast<DataType>(tmp); } } template <> void sum<float, CUDAContext>( const int size, const float *x_ptr, float *y_ptr){ SumKernel<float><<<1, 128>>>(size, x_ptr, y_ptr); } template <> void sum<double, CUDAContext>( const int size, const double *x_ptr, double *y_ptr){ SumKernel<double><<<1, 128>>>(size, x_ptr, y_ptr); } } /* math */ } /* mlfe */
fe9cf37349748f8210034cba0ba3fd03b224bfb4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //20M19118_CHENYING_CUDA //module load cuda //nvcc CUDA_code.cu -std=c++11 #include <cstdio> #include <cmath> #include <vector> #include <chrono> #include <stdlib.h> using namespace std; __global__ void matrix(int N, float *A, float *B, float* C){ int i = blockIdx.x / N; int j = blockIdx.x % N; int k = threadIdx.x; atomicAdd(C+N*i+j, A[N*i+k] * B[N*k+j]); } int main(int argc, char** argv) { const int N = 256; float *A; float *B; float *C; hipMallocManaged(&A, N*N*sizeof(float)); hipMallocManaged(&B, N*N*sizeof(float)); hipMallocManaged(&C, N*N*sizeof(float)); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { A[N*i+j] = drand48(); B[N*i+j] = drand48(); } } auto tic = chrono::steady_clock::now(); hipLaunchKernelGGL(( matrix), dim3(N*N),dim3(N), 0, 0, N,A,B,C); hipDeviceSynchronize(); auto toc = chrono::steady_clock::now(); for (int i=0; i<N; i++) for (int j=0; j<N; j++) for (int k=0; k<N; k++) C[N*i+j] -= A[N*i+k] * B[N*k+j]; double err = 0; for (int i=0; i<N; i++) for (int j=0; j<N; j++) err += fabs(C[N*i+j]); double time = chrono::duration<double>(toc-tic).count(); printf("N : %d\n",N); printf("total: %lf s (%lf GFlops)\n",time,2.*N*N*N/time/1e9); printf("error: %lf\n",err/N/N); }
fe9cf37349748f8210034cba0ba3fd03b224bfb4.cu
//20M19118_CHENYING_CUDA //module load cuda //nvcc CUDA_code.cu -std=c++11 #include <cstdio> #include <cmath> #include <vector> #include <chrono> #include <stdlib.h> using namespace std; __global__ void matrix(int N, float *A, float *B, float* C){ int i = blockIdx.x / N; int j = blockIdx.x % N; int k = threadIdx.x; atomicAdd(C+N*i+j, A[N*i+k] * B[N*k+j]); } int main(int argc, char** argv) { const int N = 256; float *A; float *B; float *C; cudaMallocManaged(&A, N*N*sizeof(float)); cudaMallocManaged(&B, N*N*sizeof(float)); cudaMallocManaged(&C, N*N*sizeof(float)); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { A[N*i+j] = drand48(); B[N*i+j] = drand48(); } } auto tic = chrono::steady_clock::now(); matrix<<<N*N,N>>>(N,A,B,C); cudaDeviceSynchronize(); auto toc = chrono::steady_clock::now(); for (int i=0; i<N; i++) for (int j=0; j<N; j++) for (int k=0; k<N; k++) C[N*i+j] -= A[N*i+k] * B[N*k+j]; double err = 0; for (int i=0; i<N; i++) for (int j=0; j<N; j++) err += fabs(C[N*i+j]); double time = chrono::duration<double>(toc-tic).count(); printf("N : %d\n",N); printf("total: %lf s (%lf GFlops)\n",time,2.*N*N*N/time/1e9); printf("error: %lf\n",err/N/N); }
bbf2ef95fab2df34586a991386d8dec2df0fc3ef.hip
// !!! This is a file automatically generated by hipify!!! // ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2017, September 11 - October 9 // ### #include "helper.h" #include <iostream> #include "math.h" using namespace std; // uncomment to use the camera //#define CAMERA int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be initialized // This happens on the very first call to a CUDA function, and takes some time (around half a second) // We will do it right here, so that the run time measurements are accurate hipDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise #ifdef CAMERA #else // input image string image = ""; bool ret = getParam("i", image, argc, argv); if (!ret) cerr << "ERROR: no image specified" << endl; if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; } #endif // number of computation repetitions to get a better run time measurement int repeats = 1; getParam("repeats", repeats, argc, argv); cout << "repeats: " << repeats << endl; // load the input image as grayscale if "-gray" is specifed bool gray = false; getParam("gray", gray, argc, argv); cout << "gray: " << gray << endl; // ### Define your own parameters here as needed // Init camera / Load input image #ifdef CAMERA // Init camera cv::VideoCapture camera(0); if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; } int camW = 640; int camH = 480; camera.set(CV_CAP_PROP_FRAME_WIDTH,camW); camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH); // read in first frame to get the dimensions cv::Mat mIn; camera >> mIn; #else // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // check if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; } #endif // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; // get image dimensions int w = mIn.cols; // width int h = mIn.rows; // height int nc = mIn.channels(); // number of channels cout << "image: " << w << " x " << h << endl; // Set the output image format // ### // ### // ### TODO: Change the output image format as needed // ### // ### cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers //cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers //cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer // ### Define your own output images here as needed if (nc == 1) { cv::Mat mOut(h,w,CV_32FC1); } else{cv::Mat mOut(h,w,CV_32FC3);} // Allocate arrays // input/output image width: w // input/output image height: h // input image number of channels: nc // output image number of channels: mOut.channels(), as defined above (nc, 3, or 1) // allocate raw input image array float *imgIn = new float[(size_t)w*h*nc]; // allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying) float *imgOut = new float[(size_t)w*h*mOut.channels()]; // For camera mode: Make a loop to read in camera frames #ifdef CAMERA // Read a camera image frame every 30 milliseconds: // cv::waitKey(30) waits 30 milliseconds for a keyboard input, // returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed while (cv::waitKey(30) < 0) { // Get camera image camera >> mIn; // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; #endif // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered (imgIn, mIn); int version = 1; Timer timer; int len = w*h*nc; //Exercise 6 //############################################# // show input image showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100) // show output image: first convert to interleaved opencv format from the layered raw array convert_layered_to_mat(mOut, imgOut); showImage("Output", mOut, 100+w+40, 100); // ### Display your own output images here as needed #ifdef CAMERA // end of camera loop } #else // wait for key inputs cv::waitKey(0); #endif // save input and result cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255] cv::imwrite("image_result.png",mOut*255.f); // free allocated arrays delete[] imgIn; delete[] imgOut; // close all opencv windows cvDestroyAllWindows(); return 0; }
bbf2ef95fab2df34586a991386d8dec2df0fc3ef.cu
// ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2017, September 11 - October 9 // ### #include "helper.h" #include <iostream> #include "math.h" using namespace std; // uncomment to use the camera //#define CAMERA int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be initialized // This happens on the very first call to a CUDA function, and takes some time (around half a second) // We will do it right here, so that the run time measurements are accurate cudaDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise #ifdef CAMERA #else // input image string image = ""; bool ret = getParam("i", image, argc, argv); if (!ret) cerr << "ERROR: no image specified" << endl; if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; } #endif // number of computation repetitions to get a better run time measurement int repeats = 1; getParam("repeats", repeats, argc, argv); cout << "repeats: " << repeats << endl; // load the input image as grayscale if "-gray" is specifed bool gray = false; getParam("gray", gray, argc, argv); cout << "gray: " << gray << endl; // ### Define your own parameters here as needed // Init camera / Load input image #ifdef CAMERA // Init camera cv::VideoCapture camera(0); if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; } int camW = 640; int camH = 480; camera.set(CV_CAP_PROP_FRAME_WIDTH,camW); camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH); // read in first frame to get the dimensions cv::Mat mIn; camera >> mIn; #else // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // check if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; } #endif // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; // get image dimensions int w = mIn.cols; // width int h = mIn.rows; // height int nc = mIn.channels(); // number of channels cout << "image: " << w << " x " << h << endl; // Set the output image format // ### // ### // ### TODO: Change the output image format as needed // ### // ### cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers //cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers //cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer // ### Define your own output images here as needed if (nc == 1) { cv::Mat mOut(h,w,CV_32FC1); } else{cv::Mat mOut(h,w,CV_32FC3);} // Allocate arrays // input/output image width: w // input/output image height: h // input image number of channels: nc // output image number of channels: mOut.channels(), as defined above (nc, 3, or 1) // allocate raw input image array float *imgIn = new float[(size_t)w*h*nc]; // allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying) float *imgOut = new float[(size_t)w*h*mOut.channels()]; // For camera mode: Make a loop to read in camera frames #ifdef CAMERA // Read a camera image frame every 30 milliseconds: // cv::waitKey(30) waits 30 milliseconds for a keyboard input, // returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed while (cv::waitKey(30) < 0) { // Get camera image camera >> mIn; // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; #endif // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered (imgIn, mIn); int version = 1; Timer timer; int len = w*h*nc; //Exercise 6 //############################################# // show input image showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100) // show output image: first convert to interleaved opencv format from the layered raw array convert_layered_to_mat(mOut, imgOut); showImage("Output", mOut, 100+w+40, 100); // ### Display your own output images here as needed #ifdef CAMERA // end of camera loop } #else // wait for key inputs cv::waitKey(0); #endif // save input and result cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255] cv::imwrite("image_result.png",mOut*255.f); // free allocated arrays delete[] imgIn; delete[] imgOut; // close all opencv windows cvDestroyAllWindows(); return 0; }
686c6325320cdea5878a22f5b04bb887c7797a9f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/concatenate.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/merge.cuh> #include <cudf/detail/sorting.hpp> #include <cudf/detail/tdigest/tdigest.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/tdigest/tdigest_column_view.cuh> #include <cudf/utilities/span.hpp> #include <cudf/lists/lists_column_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/iterator/discard_iterator.h> namespace cudf { namespace groupby { namespace detail { using namespace cudf::tdigest; namespace { // the most representative point within a cluster of similar // values. {mean, weight} // NOTE: Using a tuple here instead of a struct to take advantage of // thrust zip iterators for output. using centroid = thrust::tuple<double, double, bool>; // make a centroid from a scalar with a weight of 1. template <typename T> struct make_centroid { column_device_view const col; centroid operator() __device__(size_type index) { auto const is_valid = col.is_valid(index); auto const mean = is_valid ? static_cast<double>(col.element<T>(index)) : 0.0; auto const weight = is_valid ? 1.0 : 0.0; return {mean, weight, is_valid}; } }; // make a centroid from an input stream of mean/weight values. struct make_weighted_centroid { double const* mean; double const* weight; centroid operator() __device__(size_type index) { return {mean[index], weight[index], true}; } }; // merge two centroids struct merge_centroids { centroid operator() __device__(centroid const& lhs, centroid const& rhs) { bool const lhs_valid = thrust::get<2>(lhs); bool const rhs_valid = thrust::get<2>(rhs); if (!lhs_valid && !rhs_valid) { return {0, 0, false}; } if (!lhs_valid) { return rhs; } if (!rhs_valid) { return lhs; } double const lhs_mean = thrust::get<0>(lhs); double const rhs_mean = thrust::get<0>(rhs); double const lhs_weight = thrust::get<1>(lhs); double const rhs_weight = thrust::get<1>(rhs); double const new_weight = lhs_weight + rhs_weight; return {(lhs_mean * lhs_weight + rhs_mean * rhs_weight) / new_weight, new_weight, true}; } }; /** * @brief A functor which returns the nearest cumulative weight in the input stream prior to the * specified next weight limit. * * This functor assumes the weight for all scalars is simply 1. Under this assumption, * the nearest weight that will be <= the next limit is simply the nearest integer < the limit, * which we can get by just taking floor(next_limit). For example if our next limit is 3.56, the * nearest whole number <= it is floor(3.56) == 3. */ struct nearest_value_scalar_weights { offset_type const* group_offsets; thrust::pair<double, int> operator() __device__(double next_limit, size_type group_index) { double const f = floor(next_limit); auto const relative_weight_index = max(0, static_cast<int>(next_limit) - 1); auto const group_size = group_offsets[group_index + 1] - group_offsets[group_index]; return {f, relative_weight_index < group_size ? relative_weight_index : group_size - 1}; } }; /** * @brief A functor which returns the nearest cumulative weight in the input stream prior to the * specified next weight limit. * * This functor assumes we are dealing with grouped, sorted, weighted centroids. */ struct nearest_value_centroid_weights { double const* cumulative_weights; offset_type const* outer_offsets; // groups offset_type const* inner_offsets; // tdigests within a group thrust::pair<double, int> operator() __device__(double next_limit, size_type group_index) { auto const tdigest_begin = outer_offsets[group_index]; auto const tdigest_end = outer_offsets[group_index + 1]; auto const num_weights = inner_offsets[tdigest_end] - inner_offsets[tdigest_begin]; // NOTE: as it is today, this functor will never be called for any digests that are empty, but // I'll leave this check here for safety. if (num_weights == 0) { return thrust::pair<double, int>{0, 0}; } double const* group_cumulative_weights = cumulative_weights + inner_offsets[tdigest_begin]; auto const index = ((thrust::lower_bound(thrust::seq, group_cumulative_weights, group_cumulative_weights + num_weights, next_limit)) - group_cumulative_weights); return index == 0 ? thrust::pair<double, int>{0, 0} : thrust::pair<double, int>{group_cumulative_weights[index - 1], static_cast<int>(index) - 1}; } }; /** * @brief A functor which returns the cumulative input weight for a given index in a * set of grouped input values. * * This functor assumes the weight for all scalars is simply 1. Under this assumption, * the cumulative weight for a given value index I is simply I+1. */ struct cumulative_scalar_weight { cudf::device_span<size_type const> group_offsets; cudf::device_span<size_type const> group_labels; std::tuple<size_type, size_type, double> operator() __device__(size_type value_index) const { auto const group_index = group_labels[value_index]; auto const relative_value_index = value_index - group_offsets[group_index]; return {group_index, relative_value_index, relative_value_index + 1}; } }; /** * @brief A functor which returns the cumulative input weight for a given index in a * set of grouped input centroids. * * This functor assumes we are dealing with grouped, weighted centroids. */ struct cumulative_centroid_weight { double const* cumulative_weights; cudf::device_span<size_type const> group_labels; offset_type const* outer_offsets; // groups cudf::device_span<offset_type const> inner_offsets; // tdigests with a group std::tuple<size_type, size_type, double> operator() __device__(size_type value_index) const { auto const tdigest_index = static_cast<size_type>( thrust::upper_bound(thrust::seq, inner_offsets.begin(), inner_offsets.end(), value_index) - inner_offsets.begin()) - 1; auto const group_index = group_labels[tdigest_index]; auto const first_tdigest_index = outer_offsets[group_index]; auto const first_weight_index = inner_offsets[first_tdigest_index]; auto const relative_value_index = value_index - first_weight_index; double const* group_cumulative_weights = cumulative_weights + first_weight_index; return {group_index, relative_value_index, group_cumulative_weights[relative_value_index]}; } }; // retrieve group info of scalar inputs by group index struct scalar_group_info { size_type const* group_valid_counts; offset_type const* group_offsets; __device__ thrust::tuple<double, size_type, size_type> operator()(size_type group_index) { return {static_cast<double>(group_valid_counts[group_index]), group_offsets[group_index + 1] - group_offsets[group_index], group_offsets[group_index]}; } }; // retrieve group info of centroid inputs by group index struct centroid_group_info { double const* cumulative_weights; offset_type const* outer_offsets; offset_type const* inner_offsets; __device__ thrust::tuple<double, size_type, size_type> operator()(size_type group_index) { // if there's no weights in this group of digests at all, return 0. auto const group_start = inner_offsets[outer_offsets[group_index]]; auto const group_end = inner_offsets[outer_offsets[group_index + 1]]; auto const num_weights = group_end - group_start; auto const last_weight_index = group_end - 1; return num_weights == 0 ? thrust::tuple<double, size_type, size_type>{0, num_weights, group_start} : thrust::tuple<double, size_type, size_type>{ cumulative_weights[last_weight_index], num_weights, group_start}; } }; struct tdigest_min { __device__ double operator()(thrust::tuple<double, size_type> const& t) { auto const min = thrust::get<0>(t); auto const size = thrust::get<1>(t); return size > 0 ? min : std::numeric_limits<double>::max(); } }; struct tdigest_max { __device__ double operator()(thrust::tuple<double, size_type> const& t) { auto const max = thrust::get<0>(t); auto const size = thrust::get<1>(t); return size > 0 ? max : std::numeric_limits<double>::lowest(); } }; // a monotonically increasing scale function which produces a distribution // of centroids that is more densely packed in the middle of the input // than at the ends. __device__ double scale_func_k1(double quantile, double delta_norm) { double k = delta_norm * asin(2.0 * quantile - 1.0); k += 1.0; double q = (sin(k / delta_norm) + 1.0) / 2.0; return q; } /** * @brief Compute a set of cluster limits (brackets, essentially) for a * given tdigest based on the specified delta and the total weight of values * to be added. * * The number of clusters generated will always be <= delta_, where delta_ is * a reasonably small number likely << 10000. * * Each input group gets an independent set of clusters generated. 1 thread * per group. * * This kernel is called in a two-pass style. Once to compute the per-group * cluster sizes and total # of clusters, and once to compute the actual * weight limits per cluster. * * @param delta tdigest compression level * @param num_groups The number of input groups * @param nearest_weight A functor which returns the nearest weight in the input * stream that falls before our current cluster limit * @param group_info A functor which returns the info for the specified group (total * weight, size and start offset) * @param group_cluster_wl Output. The set of cluster weight limits for each group. * @param group_num_clusters Output. The number of output clusters for each input group. * @param group_cluster_offsets Offsets per-group to the start of it's clusters * @param has_nulls Whether or not the input contains nulls * */ template <typename GroupInfo, typename NearestWeightFunc, typename CumulativeWeight> __global__ void generate_cluster_limits_kernel(int delta, size_type num_groups, NearestWeightFunc nearest_weight, GroupInfo group_info, CumulativeWeight cumulative_weight, double* group_cluster_wl, size_type* group_num_clusters, offset_type const* group_cluster_offsets, bool has_nulls) { int const tid = threadIdx.x + blockIdx.x * blockDim.x; auto const group_index = tid; if (group_index >= num_groups) { return; } // we will generate at most delta clusters. double const delta_norm = static_cast<double>(delta) / (2.0 * M_PI); double total_weight; size_type group_size, group_start; thrust::tie(total_weight, group_size, group_start) = group_info(group_index); // start at the correct place based on our cluster offset. double* cluster_wl = group_cluster_wl ? group_cluster_wl + group_cluster_offsets[group_index] : nullptr; // a group with nothing in it. group_num_clusters[group_index] = 0; if (total_weight <= 0) { // if the input contains nulls we can potentially have a group that generates no // clusters because -all- of the input values are null. in that case, the reduce_by_key call // in the tdigest generation step will need a location to store the unused reduction value for // that group of nulls. these "stubs" will be postprocessed out afterwards. if (has_nulls) { group_num_clusters[group_index] = 1; } return; } double cur_limit = 0.0; double cur_weight = 0.0; double next_limit = -1.0; int last_inserted_index = -1; // group-relative index into the input stream // compute the first cluster limit double nearest_w; int nearest_w_index; // group-relative index into the input stream while (true) { cur_weight = next_limit < 0 ? 0 : max(cur_weight + 1, nearest_w); if (cur_weight >= total_weight) { break; } // based on where we are closing the cluster off (not including the incoming weight), // compute the next cluster limit double const quantile = cur_weight / total_weight; next_limit = total_weight * scale_func_k1(quantile, delta_norm); // if the next limit is < the cur limit, we're past the end of the distribution, so we're done. if (next_limit <= cur_limit) { if (cluster_wl) { cluster_wl[group_num_clusters[group_index]] = total_weight; } group_num_clusters[group_index]++; break; } // compute the weight we will be at in the input values just before closing off the current // cluster (because adding the next value will cross the current limit). // NOTE: can't use structured bindings here. thrust::tie(nearest_w, nearest_w_index) = nearest_weight(next_limit, group_index); if (cluster_wl) { // because of the way the scale functions work, it is possible to generate clusters // in such a way that we end up with "gaps" where there are no input values that // fall into a given cluster. An example would be this: // // cluster weight limits = 0.00003, 1.008, 3.008 // // input values(weight) = A(1), B(2), C(3) // // naively inserting these values into the clusters simply by taking a lower_bound, // we would get the following distribution of input values into those 3 clusters. // (), (A), (B,C) // // whereas what we really want is: // // (A), (B), (C) // // to fix this, we will artificially adjust the output cluster limits to guarantee // at least 1 input value will be put in each cluster during the reduction step. // this does not affect final centroid results as we still use the "real" weight limits // to compute subsequent clusters - the purpose is only to allow cluster selection // during the reduction step to be trivial. // double adjusted_next_limit = next_limit; if ((last_inserted_index < 0) || // if we haven't inserted anything yet (nearest_w_index == last_inserted_index)) { // if we land in the same bucket as the previous cap // force the value into this bucket nearest_w_index = (last_inserted_index == group_size - 1) ? last_inserted_index : last_inserted_index + 1; // the "adjusted" weight must be high enough so that this value will fall in the bucket. // NOTE: cumulative_weight expects an absolute index into the input value stream, not a // group-relative index [[maybe_unused]] auto [r, i, adjusted] = cumulative_weight(nearest_w_index + group_start); adjusted_next_limit = max(next_limit, adjusted); } cluster_wl[group_num_clusters[group_index]] = adjusted_next_limit; last_inserted_index = nearest_w_index; } group_num_clusters[group_index]++; cur_limit = next_limit; } } /** * @brief Compute a set of cluster limits (brackets, essentially) for a * given tdigest based on the specified delta and the total weight of values * to be added. * * The number of clusters generated will always be <= delta_, where delta_ is * a reasonably small number likely << 10000. * * Each input group gets an independent set of clusters generated. * * @param delta_ tdigest compression level * @param num_groups The number of input groups * @param nearest_weight A functor which returns the nearest weight in the input * stream that falls before our current cluster limit * @param group_info A functor which returns the info for the specified group (total weight, * size and start offset) * @param has_nulls Whether or not the input data contains nulls * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory * * @returns A tuple containing the set of cluster weight limits for each group, a set of * list-style offsets indicating group sizes, and the total number of clusters */ template <typename GroupInfo, typename NearestWeight, typename CumulativeWeight> std::tuple<rmm::device_uvector<double>, std::unique_ptr<column>, size_type> generate_group_cluster_info(int delta, size_type num_groups, NearestWeight nearest_weight, GroupInfo group_info, CumulativeWeight cumulative_weight, bool has_nulls, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { constexpr size_type block_size = 256; cudf::detail::grid_1d const grid(num_groups, block_size); // compute number of clusters per group // each thread computes 1 set of clusters (# of cluster sets == # of groups) rmm::device_uvector<size_type> group_num_clusters(num_groups, stream); hipLaunchKernelGGL(( generate_cluster_limits_kernel), dim3(grid.num_blocks), dim3(block_size), 0, stream.value(), delta, num_groups, nearest_weight, group_info, cumulative_weight, nullptr, group_num_clusters.begin(), nullptr, has_nulls); // generate group cluster offsets (where the clusters for a given group start and end) auto group_cluster_offsets = cudf::make_numeric_column( data_type{type_id::INT32}, num_groups + 1, mask_state::UNALLOCATED, stream, mr); auto cluster_size = cudf::detail::make_counting_transform_iterator( 0, [group_num_clusters = group_num_clusters.begin(), num_groups] __device__(size_type index) { return index == num_groups ? 0 : group_num_clusters[index]; }); thrust::exclusive_scan(rmm::exec_policy(stream), cluster_size, cluster_size + num_groups + 1, group_cluster_offsets->mutable_view().begin<offset_type>(), 0); // total # of clusters offset_type total_clusters = cudf::detail::get_value<offset_type>(group_cluster_offsets->view(), num_groups, stream); // fill in the actual cluster weight limits rmm::device_uvector<double> group_cluster_wl(total_clusters, stream); hipLaunchKernelGGL(( generate_cluster_limits_kernel), dim3(grid.num_blocks), dim3(block_size), 0, stream.value(), delta, num_groups, nearest_weight, group_info, cumulative_weight, group_cluster_wl.begin(), group_num_clusters.begin(), group_cluster_offsets->view().begin<offset_type>(), has_nulls); return {std::move(group_cluster_wl), std::move(group_cluster_offsets), static_cast<size_type>(total_clusters)}; } std::unique_ptr<column> build_output_column(size_type num_rows, std::unique_ptr<column>&& means, std::unique_ptr<column>&& weights, std::unique_ptr<column>&& offsets, std::unique_ptr<column>&& min_col, std::unique_ptr<column>&& max_col, bool has_nulls, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // whether or not this weight is a stub auto is_stub_weight = [weights = weights->view().begin<double>()] __device__(size_type i) { return weights[i] == 0; }; // whether or not this particular tdigest is a stub auto is_stub_digest = [offsets = offsets->view().begin<offset_type>(), is_stub_weight] __device__( size_type i) { return is_stub_weight(offsets[i]) ? 1 : 0; }; size_type const num_stubs = [&]() { if (!has_nulls) { return 0; } auto iter = cudf::detail::make_counting_transform_iterator(0, is_stub_digest); return thrust::reduce(rmm::exec_policy(stream), iter, iter + num_rows); }(); // if there are no stub tdigests, we can return immediately. if (num_stubs == 0) { return cudf::detail::tdigest::make_tdigest_column(num_rows, std::move(means), std::move(weights), std::move(offsets), std::move(min_col), std::move(max_col), stream, mr); } // otherwise we need to strip out the stubs. auto remove_stubs = [&](column_view const& col, size_type num_stubs) { auto result = cudf::make_numeric_column( data_type{type_id::FLOAT64}, col.size() - num_stubs, mask_state::UNALLOCATED, stream, mr); thrust::remove_copy_if(rmm::exec_policy(stream), col.begin<double>(), col.end<double>(), thrust::make_counting_iterator(0), result->mutable_view().begin<double>(), is_stub_weight); return result; }; // remove from the means and weights column auto _means = remove_stubs(*means, num_stubs); auto _weights = remove_stubs(*weights, num_stubs); // adjust offsets. rmm::device_uvector<offset_type> sizes(num_rows, stream); thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + num_rows, sizes.begin(), [offsets = offsets->view().begin<offset_type>()] __device__(size_type i) { return offsets[i + 1] - offsets[i]; }); auto iter = cudf::detail::make_counting_transform_iterator( 0, [sizes = sizes.begin(), is_stub_digest, num_rows] __device__(size_type i) { return i == num_rows || is_stub_digest(i) ? 0 : sizes[i]; }); thrust::exclusive_scan(rmm::exec_policy(stream), iter, iter + num_rows + 1, offsets->mutable_view().begin<offset_type>(), 0); // assemble final column return cudf::detail::tdigest::make_tdigest_column(num_rows, std::move(_means), std::move(_weights), std::move(offsets), std::move(min_col), std::move(max_col), stream, mr); } /** * @brief Compute a column of tdigests. * * Assembles the output tdigest column based on the specified delta, a stream of * input values (either scalar or centroids), and an assortment of per-group * clustering information. * * This function is effectively just a reduce_by_key that performs a reduction * from input values -> centroid clusters as defined by the the cluster weight * boundaries. * * @param delta tdigest compression level * @param values_begin Beginning of the range of input values. * @param values_end End of the range of input values. * @param cumulative_weight Functor which returns cumulative weight and group information for * an absolute input value index. * @param min_col Column containing the minimum value per group. * @param max_col Column containing the maximum value per group. * @param group_cluster_wl Cluster weight limits for each group. * @param group_cluster_offsets R-value reference of offsets into the cluster weight limits. * @param total_clusters Total number of clusters in all groups. * @param has_nulls Whether or not the input contains nulls * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory * * @returns A tdigest column with 1 row per output tdigest. */ template <typename CentroidIter, typename CumulativeWeight> std::unique_ptr<column> compute_tdigests(int delta, CentroidIter centroids_begin, CentroidIter centroids_end, CumulativeWeight group_cumulative_weight, std::unique_ptr<column>&& min_col, std::unique_ptr<column>&& max_col, rmm::device_uvector<double> const& group_cluster_wl, std::unique_ptr<column>&& group_cluster_offsets, size_type total_clusters, bool has_nulls, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // the output for each group is a column of data that represents the tdigest. since we want 1 row // per group, each row will be a list the length of the tdigest for that group. so our output // column is of the form: // struct { // centroids for the digest // list { // struct { // double // mean // double // weight // } // } // double // min // double // max // } // if (total_clusters == 0) { return cudf::detail::tdigest::make_empty_tdigest_column(stream, mr); } // each input group represents an individual tdigest. within each tdigest, we want the keys // to represent cluster indices (for example, if a tdigest had 100 clusters, the keys should fall // into the range 0-99). But since we have multiple tdigests, we need to keep the keys unique // between the groups, so we add our group start offset. auto keys = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [delta, group_cluster_wl = group_cluster_wl.data(), group_cluster_offsets = group_cluster_offsets->view().begin<offset_type>(), group_cumulative_weight] __device__(size_type value_index) -> size_type { // get group index, relative value index within the group and cumulative weight. [[maybe_unused]] auto [group_index, relative_value_index, cumulative_weight] = group_cumulative_weight(value_index); auto const num_clusters = group_cluster_offsets[group_index + 1] - group_cluster_offsets[group_index]; if (num_clusters == 0) { return group_cluster_offsets[group_index]; } // compute start of cluster weight limits for this group double const* weight_limits = group_cluster_wl + group_cluster_offsets[group_index]; // local cluster index size_type const group_cluster_index = min(num_clusters - 1, static_cast<size_type>( thrust::lower_bound( thrust::seq, weight_limits, weight_limits + num_clusters, cumulative_weight) - weight_limits)); // add the cluster offset to generate a globally unique key return group_cluster_index + group_cluster_offsets[group_index]; }); // mean and weight data auto centroid_means = cudf::make_numeric_column( data_type{type_id::FLOAT64}, total_clusters, mask_state::UNALLOCATED, stream, mr); auto centroid_weights = cudf::make_numeric_column( data_type{type_id::FLOAT64}, total_clusters, mask_state::UNALLOCATED, stream, mr); // reduce the centroids down by key. cudf::mutable_column_view mean_col(*centroid_means); cudf::mutable_column_view weight_col(*centroid_weights); // reduce the centroids into the clusters auto output = thrust::make_zip_iterator(thrust::make_tuple( mean_col.begin<double>(), weight_col.begin<double>(), thrust::make_discard_iterator())); auto const num_values = std::distance(centroids_begin, centroids_end); thrust::reduce_by_key(rmm::exec_policy(stream), keys, keys + num_values, // keys centroids_begin, // values thrust::make_discard_iterator(), // key output output, // output thrust::equal_to{}, // key equality check merge_centroids{}); // create final tdigest column return build_output_column(group_cluster_offsets->size() - 1, std::move(centroid_means), std::move(centroid_weights), std::move(group_cluster_offsets), std::move(min_col), std::move(max_col), has_nulls, stream, mr); } // return the min/max value of scalar inputs by group index template <typename T> struct get_scalar_minmax { column_device_view const col; device_span<size_type const> group_offsets; size_type const* group_valid_counts; __device__ thrust::tuple<double, double> operator()(size_type group_index) { auto const valid_count = group_valid_counts[group_index]; return valid_count > 0 ? thrust::make_tuple( static_cast<double>(col.element<T>(group_offsets[group_index])), static_cast<double>(col.element<T>(group_offsets[group_index] + valid_count - 1))) : thrust::make_tuple(0.0, 0.0); } }; struct typed_group_tdigest { template < typename T, typename std::enable_if_t<cudf::is_numeric<T>() || cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(column_view const& col, cudf::device_span<size_type const> group_offsets, cudf::device_span<size_type const> group_labels, cudf::device_span<size_type const> group_valid_counts, size_type num_groups, int delta, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // first, generate cluster weight information for each input group auto [group_cluster_wl, group_cluster_offsets, total_clusters] = generate_group_cluster_info( delta, num_groups, nearest_value_scalar_weights{group_offsets.begin()}, scalar_group_info{group_valid_counts.begin(), group_offsets.begin()}, cumulative_scalar_weight{group_offsets, group_labels}, col.null_count() > 0, stream, mr); // device column view. handy because the .element() function // automatically handles fixed-point conversions for us auto d_col = cudf::column_device_view::create(col, stream); // compute min and max columns auto min_col = cudf::make_numeric_column( data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr); auto max_col = cudf::make_numeric_column( data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr); thrust::transform( rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + num_groups, thrust::make_zip_iterator(thrust::make_tuple(min_col->mutable_view().begin<double>(), max_col->mutable_view().begin<double>())), get_scalar_minmax<T>{*d_col, group_offsets, group_valid_counts.begin()}); // for simple input values, the "centroids" all have a weight of 1. auto scalar_to_centroid = cudf::detail::make_counting_transform_iterator(0, make_centroid<T>{*d_col}); // generate the final tdigest return compute_tdigests(delta, scalar_to_centroid, scalar_to_centroid + col.size(), cumulative_scalar_weight{group_offsets, group_labels}, std::move(min_col), std::move(max_col), group_cluster_wl, std::move(group_cluster_offsets), total_clusters, col.null_count() > 0, stream, mr); } template < typename T, typename... Args, typename std::enable_if_t<!cudf::is_numeric<T>() && !cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(Args&&...) { CUDF_FAIL("Non-numeric type in group_tdigest"); } }; } // anonymous namespace std::unique_ptr<column> group_tdigest(column_view const& col, cudf::device_span<size_type const> group_offsets, cudf::device_span<size_type const> group_labels, cudf::device_span<size_type const> group_valid_counts, size_type num_groups, int max_centroids, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (col.size() == 0) { return cudf::detail::tdigest::make_empty_tdigest_column(stream, mr); } auto const delta = max_centroids; return cudf::type_dispatcher(col.type(), typed_group_tdigest{}, col, group_offsets, group_labels, group_valid_counts, num_groups, delta, stream, mr); } std::unique_ptr<column> group_merge_tdigest(column_view const& input, cudf::device_span<size_type const> group_offsets, cudf::device_span<size_type const> group_labels, size_type num_groups, int max_centroids, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { tdigest_column_view tdv(input); if (num_groups == 0 || input.size() == 0) { return cudf::detail::tdigest::make_empty_tdigest_column(stream, mr); } // first step is to merge all the tdigests in each group. at the moment the only way to // make this work is to retrieve the group sizes (via group_offsets) and the individual digest // sizes (via input.offsets()) to the gpu and do the merges. The scale problem is that while the // size of each group will likely be small (size of each group will typically map to # of batches // the input data was chopped into for tdigest generation), the -number- of groups can be // arbitrarily large. // // thrust::merge and thrust::merge_by_key don't provide what we need. What we would need is an // algorithm like a super-merge that takes two layers of keys: one which identifies the outer // grouping of tdigests, and one which identifies the inner groupings of the tdigests within the // outer groups. // bring group offsets back to the host std::vector<size_type> h_outer_offsets(group_offsets.size()); hipMemcpyAsync(h_outer_offsets.data(), group_offsets.data(), sizeof(size_type) * group_offsets.size(), hipMemcpyDeviceToHost, stream); // bring tdigest offsets back to the host auto tdigest_offsets = tdv.centroids().offsets(); std::vector<size_type> h_inner_offsets(tdigest_offsets.size()); hipMemcpyAsync(h_inner_offsets.data(), tdigest_offsets.begin<size_type>(), sizeof(size_type) * tdigest_offsets.size(), hipMemcpyDeviceToHost, stream); stream.synchronize(); // extract all means and weights into a table cudf::table_view tdigests_unsliced({tdv.means(), tdv.weights()}); // generate the merged (but not yet compressed) tdigests for each group. std::vector<std::unique_ptr<table>> tdigests; tdigests.reserve(num_groups); std::transform( h_outer_offsets.begin(), h_outer_offsets.end() - 1, std::next(h_outer_offsets.begin()), std::back_inserter(tdigests), [&](auto tdigest_start, auto tdigest_end) { // the range of tdigests in this group auto const num_tdigests = tdigest_end - tdigest_start; // slice each tdigest from the input std::vector<table_view> unmerged_tdigests; unmerged_tdigests.reserve(num_tdigests); auto offset_iter = std::next(h_inner_offsets.begin(), tdigest_start); std::transform(offset_iter, offset_iter + num_tdigests, std::next(offset_iter), std::back_inserter(unmerged_tdigests), [&](auto start, auto end) { return cudf::detail::slice(tdigests_unsliced, {start, end}, stream); }); // merge return cudf::detail::merge(unmerged_tdigests, {0}, {order::ASCENDING}, {}, stream, mr); }); // generate min and max values auto merged_min_col = cudf::make_numeric_column( data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr); auto min_iter = thrust::make_transform_iterator( thrust::make_zip_iterator(thrust::make_tuple(tdv.min_begin(), tdv.size_begin())), tdigest_min{}); thrust::reduce_by_key(rmm::exec_policy(stream), group_labels.begin(), group_labels.end(), min_iter, thrust::make_discard_iterator(), merged_min_col->mutable_view().begin<double>(), thrust::equal_to{}, // key equality check thrust::minimum{}); auto merged_max_col = cudf::make_numeric_column( data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr); auto max_iter = thrust::make_transform_iterator( thrust::make_zip_iterator(thrust::make_tuple(tdv.max_begin(), tdv.size_begin())), tdigest_max{}); thrust::reduce_by_key(rmm::exec_policy(stream), group_labels.begin(), group_labels.end(), max_iter, thrust::make_discard_iterator(), merged_max_col->mutable_view().begin<double>(), thrust::equal_to{}, // key equality check thrust::maximum{}); // for any empty groups, set the min and max to be 0. not technically necessary but it makes // testing simpler. auto group_num_weights = cudf::detail::make_counting_transform_iterator( 0, [outer_offsets = group_offsets.data(), inner_offsets = tdigest_offsets.begin<size_type>()] __device__(size_type group_index) -> size_type { auto const tdigest_begin = outer_offsets[group_index]; auto const tdigest_end = outer_offsets[group_index + 1]; return inner_offsets[tdigest_end] - inner_offsets[tdigest_begin]; }); auto group_is_empty = [] __device__(size_type group_size) { return group_size == 0; }; thrust::replace_if(rmm::exec_policy(stream), merged_min_col->mutable_view().begin<double>(), merged_min_col->mutable_view().end<double>(), group_num_weights, group_is_empty, 0); thrust::replace_if(rmm::exec_policy(stream), merged_max_col->mutable_view().begin<double>(), merged_max_col->mutable_view().end<double>(), group_num_weights, group_is_empty, 0); // concatenate all the merged tdigests back into one table. std::vector<table_view> tdigest_views; tdigest_views.reserve(num_groups); std::transform(tdigests.begin(), tdigests.end(), std::back_inserter(tdigest_views), [](std::unique_ptr<table> const& t) { return t->view(); }); auto merged = cudf::detail::concatenate(tdigest_views, stream, mr); // generate cumulative weights auto merged_weights = merged->get_column(1).view(); auto cumulative_weights = cudf::make_numeric_column( data_type{type_id::FLOAT64}, merged_weights.size(), mask_state::UNALLOCATED); auto keys = cudf::detail::make_counting_transform_iterator( 0, [group_labels = group_labels.begin(), inner_offsets = tdigest_offsets.begin<size_type>(), num_inner_offsets = tdigest_offsets.size()] __device__(int index) { // what -original- tdigest index this absolute index corresponds to auto const iter = thrust::prev( thrust::upper_bound(thrust::seq, inner_offsets, inner_offsets + num_inner_offsets, index)); auto const tdigest_index = thrust::distance(inner_offsets, iter); // what group index the original tdigest belongs to return group_labels[tdigest_index]; }); thrust::inclusive_scan_by_key(rmm::exec_policy(stream), keys, keys + cumulative_weights->size(), merged_weights.begin<double>(), cumulative_weights->mutable_view().begin<double>()); auto const delta = max_centroids; // generate cluster info auto [group_cluster_wl, group_cluster_offsets, total_clusters] = generate_group_cluster_info( delta, num_groups, nearest_value_centroid_weights{cumulative_weights->view().begin<double>(), group_offsets.data(), tdigest_offsets.begin<size_type>()}, centroid_group_info{cumulative_weights->view().begin<double>(), group_offsets.data(), tdigest_offsets.begin<size_type>()}, cumulative_centroid_weight{ cumulative_weights->view().begin<double>(), group_labels, group_offsets.data(), {tdigest_offsets.begin<offset_type>(), static_cast<size_t>(tdigest_offsets.size())}}, false, stream, mr); // input centroid values auto centroids = cudf::detail::make_counting_transform_iterator( 0, make_weighted_centroid{merged->get_column(0).view().begin<double>(), merged_weights.begin<double>()}); // compute the tdigest return compute_tdigests(delta, centroids, centroids + merged->num_rows(), cumulative_centroid_weight{cumulative_weights->view().begin<double>(), group_labels, group_offsets.data(), {tdigest_offsets.begin<offset_type>(), static_cast<size_t>(tdigest_offsets.size())}}, std::move(merged_min_col), std::move(merged_max_col), group_cluster_wl, std::move(group_cluster_offsets), total_clusters, false, stream, mr); } } // namespace detail } // namespace groupby } // namespace cudf
686c6325320cdea5878a22f5b04bb887c7797a9f.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/concatenate.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/merge.cuh> #include <cudf/detail/sorting.hpp> #include <cudf/detail/tdigest/tdigest.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/tdigest/tdigest_column_view.cuh> #include <cudf/utilities/span.hpp> #include <cudf/lists/lists_column_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/iterator/discard_iterator.h> namespace cudf { namespace groupby { namespace detail { using namespace cudf::tdigest; namespace { // the most representative point within a cluster of similar // values. {mean, weight} // NOTE: Using a tuple here instead of a struct to take advantage of // thrust zip iterators for output. using centroid = thrust::tuple<double, double, bool>; // make a centroid from a scalar with a weight of 1. template <typename T> struct make_centroid { column_device_view const col; centroid operator() __device__(size_type index) { auto const is_valid = col.is_valid(index); auto const mean = is_valid ? static_cast<double>(col.element<T>(index)) : 0.0; auto const weight = is_valid ? 1.0 : 0.0; return {mean, weight, is_valid}; } }; // make a centroid from an input stream of mean/weight values. struct make_weighted_centroid { double const* mean; double const* weight; centroid operator() __device__(size_type index) { return {mean[index], weight[index], true}; } }; // merge two centroids struct merge_centroids { centroid operator() __device__(centroid const& lhs, centroid const& rhs) { bool const lhs_valid = thrust::get<2>(lhs); bool const rhs_valid = thrust::get<2>(rhs); if (!lhs_valid && !rhs_valid) { return {0, 0, false}; } if (!lhs_valid) { return rhs; } if (!rhs_valid) { return lhs; } double const lhs_mean = thrust::get<0>(lhs); double const rhs_mean = thrust::get<0>(rhs); double const lhs_weight = thrust::get<1>(lhs); double const rhs_weight = thrust::get<1>(rhs); double const new_weight = lhs_weight + rhs_weight; return {(lhs_mean * lhs_weight + rhs_mean * rhs_weight) / new_weight, new_weight, true}; } }; /** * @brief A functor which returns the nearest cumulative weight in the input stream prior to the * specified next weight limit. * * This functor assumes the weight for all scalars is simply 1. Under this assumption, * the nearest weight that will be <= the next limit is simply the nearest integer < the limit, * which we can get by just taking floor(next_limit). For example if our next limit is 3.56, the * nearest whole number <= it is floor(3.56) == 3. */ struct nearest_value_scalar_weights { offset_type const* group_offsets; thrust::pair<double, int> operator() __device__(double next_limit, size_type group_index) { double const f = floor(next_limit); auto const relative_weight_index = max(0, static_cast<int>(next_limit) - 1); auto const group_size = group_offsets[group_index + 1] - group_offsets[group_index]; return {f, relative_weight_index < group_size ? relative_weight_index : group_size - 1}; } }; /** * @brief A functor which returns the nearest cumulative weight in the input stream prior to the * specified next weight limit. * * This functor assumes we are dealing with grouped, sorted, weighted centroids. */ struct nearest_value_centroid_weights { double const* cumulative_weights; offset_type const* outer_offsets; // groups offset_type const* inner_offsets; // tdigests within a group thrust::pair<double, int> operator() __device__(double next_limit, size_type group_index) { auto const tdigest_begin = outer_offsets[group_index]; auto const tdigest_end = outer_offsets[group_index + 1]; auto const num_weights = inner_offsets[tdigest_end] - inner_offsets[tdigest_begin]; // NOTE: as it is today, this functor will never be called for any digests that are empty, but // I'll leave this check here for safety. if (num_weights == 0) { return thrust::pair<double, int>{0, 0}; } double const* group_cumulative_weights = cumulative_weights + inner_offsets[tdigest_begin]; auto const index = ((thrust::lower_bound(thrust::seq, group_cumulative_weights, group_cumulative_weights + num_weights, next_limit)) - group_cumulative_weights); return index == 0 ? thrust::pair<double, int>{0, 0} : thrust::pair<double, int>{group_cumulative_weights[index - 1], static_cast<int>(index) - 1}; } }; /** * @brief A functor which returns the cumulative input weight for a given index in a * set of grouped input values. * * This functor assumes the weight for all scalars is simply 1. Under this assumption, * the cumulative weight for a given value index I is simply I+1. */ struct cumulative_scalar_weight { cudf::device_span<size_type const> group_offsets; cudf::device_span<size_type const> group_labels; std::tuple<size_type, size_type, double> operator() __device__(size_type value_index) const { auto const group_index = group_labels[value_index]; auto const relative_value_index = value_index - group_offsets[group_index]; return {group_index, relative_value_index, relative_value_index + 1}; } }; /** * @brief A functor which returns the cumulative input weight for a given index in a * set of grouped input centroids. * * This functor assumes we are dealing with grouped, weighted centroids. */ struct cumulative_centroid_weight { double const* cumulative_weights; cudf::device_span<size_type const> group_labels; offset_type const* outer_offsets; // groups cudf::device_span<offset_type const> inner_offsets; // tdigests with a group std::tuple<size_type, size_type, double> operator() __device__(size_type value_index) const { auto const tdigest_index = static_cast<size_type>( thrust::upper_bound(thrust::seq, inner_offsets.begin(), inner_offsets.end(), value_index) - inner_offsets.begin()) - 1; auto const group_index = group_labels[tdigest_index]; auto const first_tdigest_index = outer_offsets[group_index]; auto const first_weight_index = inner_offsets[first_tdigest_index]; auto const relative_value_index = value_index - first_weight_index; double const* group_cumulative_weights = cumulative_weights + first_weight_index; return {group_index, relative_value_index, group_cumulative_weights[relative_value_index]}; } }; // retrieve group info of scalar inputs by group index struct scalar_group_info { size_type const* group_valid_counts; offset_type const* group_offsets; __device__ thrust::tuple<double, size_type, size_type> operator()(size_type group_index) { return {static_cast<double>(group_valid_counts[group_index]), group_offsets[group_index + 1] - group_offsets[group_index], group_offsets[group_index]}; } }; // retrieve group info of centroid inputs by group index struct centroid_group_info { double const* cumulative_weights; offset_type const* outer_offsets; offset_type const* inner_offsets; __device__ thrust::tuple<double, size_type, size_type> operator()(size_type group_index) { // if there's no weights in this group of digests at all, return 0. auto const group_start = inner_offsets[outer_offsets[group_index]]; auto const group_end = inner_offsets[outer_offsets[group_index + 1]]; auto const num_weights = group_end - group_start; auto const last_weight_index = group_end - 1; return num_weights == 0 ? thrust::tuple<double, size_type, size_type>{0, num_weights, group_start} : thrust::tuple<double, size_type, size_type>{ cumulative_weights[last_weight_index], num_weights, group_start}; } }; struct tdigest_min { __device__ double operator()(thrust::tuple<double, size_type> const& t) { auto const min = thrust::get<0>(t); auto const size = thrust::get<1>(t); return size > 0 ? min : std::numeric_limits<double>::max(); } }; struct tdigest_max { __device__ double operator()(thrust::tuple<double, size_type> const& t) { auto const max = thrust::get<0>(t); auto const size = thrust::get<1>(t); return size > 0 ? max : std::numeric_limits<double>::lowest(); } }; // a monotonically increasing scale function which produces a distribution // of centroids that is more densely packed in the middle of the input // than at the ends. __device__ double scale_func_k1(double quantile, double delta_norm) { double k = delta_norm * asin(2.0 * quantile - 1.0); k += 1.0; double q = (sin(k / delta_norm) + 1.0) / 2.0; return q; } /** * @brief Compute a set of cluster limits (brackets, essentially) for a * given tdigest based on the specified delta and the total weight of values * to be added. * * The number of clusters generated will always be <= delta_, where delta_ is * a reasonably small number likely << 10000. * * Each input group gets an independent set of clusters generated. 1 thread * per group. * * This kernel is called in a two-pass style. Once to compute the per-group * cluster sizes and total # of clusters, and once to compute the actual * weight limits per cluster. * * @param delta tdigest compression level * @param num_groups The number of input groups * @param nearest_weight A functor which returns the nearest weight in the input * stream that falls before our current cluster limit * @param group_info A functor which returns the info for the specified group (total * weight, size and start offset) * @param group_cluster_wl Output. The set of cluster weight limits for each group. * @param group_num_clusters Output. The number of output clusters for each input group. * @param group_cluster_offsets Offsets per-group to the start of it's clusters * @param has_nulls Whether or not the input contains nulls * */ template <typename GroupInfo, typename NearestWeightFunc, typename CumulativeWeight> __global__ void generate_cluster_limits_kernel(int delta, size_type num_groups, NearestWeightFunc nearest_weight, GroupInfo group_info, CumulativeWeight cumulative_weight, double* group_cluster_wl, size_type* group_num_clusters, offset_type const* group_cluster_offsets, bool has_nulls) { int const tid = threadIdx.x + blockIdx.x * blockDim.x; auto const group_index = tid; if (group_index >= num_groups) { return; } // we will generate at most delta clusters. double const delta_norm = static_cast<double>(delta) / (2.0 * M_PI); double total_weight; size_type group_size, group_start; thrust::tie(total_weight, group_size, group_start) = group_info(group_index); // start at the correct place based on our cluster offset. double* cluster_wl = group_cluster_wl ? group_cluster_wl + group_cluster_offsets[group_index] : nullptr; // a group with nothing in it. group_num_clusters[group_index] = 0; if (total_weight <= 0) { // if the input contains nulls we can potentially have a group that generates no // clusters because -all- of the input values are null. in that case, the reduce_by_key call // in the tdigest generation step will need a location to store the unused reduction value for // that group of nulls. these "stubs" will be postprocessed out afterwards. if (has_nulls) { group_num_clusters[group_index] = 1; } return; } double cur_limit = 0.0; double cur_weight = 0.0; double next_limit = -1.0; int last_inserted_index = -1; // group-relative index into the input stream // compute the first cluster limit double nearest_w; int nearest_w_index; // group-relative index into the input stream while (true) { cur_weight = next_limit < 0 ? 0 : max(cur_weight + 1, nearest_w); if (cur_weight >= total_weight) { break; } // based on where we are closing the cluster off (not including the incoming weight), // compute the next cluster limit double const quantile = cur_weight / total_weight; next_limit = total_weight * scale_func_k1(quantile, delta_norm); // if the next limit is < the cur limit, we're past the end of the distribution, so we're done. if (next_limit <= cur_limit) { if (cluster_wl) { cluster_wl[group_num_clusters[group_index]] = total_weight; } group_num_clusters[group_index]++; break; } // compute the weight we will be at in the input values just before closing off the current // cluster (because adding the next value will cross the current limit). // NOTE: can't use structured bindings here. thrust::tie(nearest_w, nearest_w_index) = nearest_weight(next_limit, group_index); if (cluster_wl) { // because of the way the scale functions work, it is possible to generate clusters // in such a way that we end up with "gaps" where there are no input values that // fall into a given cluster. An example would be this: // // cluster weight limits = 0.00003, 1.008, 3.008 // // input values(weight) = A(1), B(2), C(3) // // naively inserting these values into the clusters simply by taking a lower_bound, // we would get the following distribution of input values into those 3 clusters. // (), (A), (B,C) // // whereas what we really want is: // // (A), (B), (C) // // to fix this, we will artificially adjust the output cluster limits to guarantee // at least 1 input value will be put in each cluster during the reduction step. // this does not affect final centroid results as we still use the "real" weight limits // to compute subsequent clusters - the purpose is only to allow cluster selection // during the reduction step to be trivial. // double adjusted_next_limit = next_limit; if ((last_inserted_index < 0) || // if we haven't inserted anything yet (nearest_w_index == last_inserted_index)) { // if we land in the same bucket as the previous cap // force the value into this bucket nearest_w_index = (last_inserted_index == group_size - 1) ? last_inserted_index : last_inserted_index + 1; // the "adjusted" weight must be high enough so that this value will fall in the bucket. // NOTE: cumulative_weight expects an absolute index into the input value stream, not a // group-relative index [[maybe_unused]] auto [r, i, adjusted] = cumulative_weight(nearest_w_index + group_start); adjusted_next_limit = max(next_limit, adjusted); } cluster_wl[group_num_clusters[group_index]] = adjusted_next_limit; last_inserted_index = nearest_w_index; } group_num_clusters[group_index]++; cur_limit = next_limit; } } /** * @brief Compute a set of cluster limits (brackets, essentially) for a * given tdigest based on the specified delta and the total weight of values * to be added. * * The number of clusters generated will always be <= delta_, where delta_ is * a reasonably small number likely << 10000. * * Each input group gets an independent set of clusters generated. * * @param delta_ tdigest compression level * @param num_groups The number of input groups * @param nearest_weight A functor which returns the nearest weight in the input * stream that falls before our current cluster limit * @param group_info A functor which returns the info for the specified group (total weight, * size and start offset) * @param has_nulls Whether or not the input data contains nulls * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory * * @returns A tuple containing the set of cluster weight limits for each group, a set of * list-style offsets indicating group sizes, and the total number of clusters */ template <typename GroupInfo, typename NearestWeight, typename CumulativeWeight> std::tuple<rmm::device_uvector<double>, std::unique_ptr<column>, size_type> generate_group_cluster_info(int delta, size_type num_groups, NearestWeight nearest_weight, GroupInfo group_info, CumulativeWeight cumulative_weight, bool has_nulls, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { constexpr size_type block_size = 256; cudf::detail::grid_1d const grid(num_groups, block_size); // compute number of clusters per group // each thread computes 1 set of clusters (# of cluster sets == # of groups) rmm::device_uvector<size_type> group_num_clusters(num_groups, stream); generate_cluster_limits_kernel<<<grid.num_blocks, block_size, 0, stream.value()>>>( delta, num_groups, nearest_weight, group_info, cumulative_weight, nullptr, group_num_clusters.begin(), nullptr, has_nulls); // generate group cluster offsets (where the clusters for a given group start and end) auto group_cluster_offsets = cudf::make_numeric_column( data_type{type_id::INT32}, num_groups + 1, mask_state::UNALLOCATED, stream, mr); auto cluster_size = cudf::detail::make_counting_transform_iterator( 0, [group_num_clusters = group_num_clusters.begin(), num_groups] __device__(size_type index) { return index == num_groups ? 0 : group_num_clusters[index]; }); thrust::exclusive_scan(rmm::exec_policy(stream), cluster_size, cluster_size + num_groups + 1, group_cluster_offsets->mutable_view().begin<offset_type>(), 0); // total # of clusters offset_type total_clusters = cudf::detail::get_value<offset_type>(group_cluster_offsets->view(), num_groups, stream); // fill in the actual cluster weight limits rmm::device_uvector<double> group_cluster_wl(total_clusters, stream); generate_cluster_limits_kernel<<<grid.num_blocks, block_size, 0, stream.value()>>>( delta, num_groups, nearest_weight, group_info, cumulative_weight, group_cluster_wl.begin(), group_num_clusters.begin(), group_cluster_offsets->view().begin<offset_type>(), has_nulls); return {std::move(group_cluster_wl), std::move(group_cluster_offsets), static_cast<size_type>(total_clusters)}; } std::unique_ptr<column> build_output_column(size_type num_rows, std::unique_ptr<column>&& means, std::unique_ptr<column>&& weights, std::unique_ptr<column>&& offsets, std::unique_ptr<column>&& min_col, std::unique_ptr<column>&& max_col, bool has_nulls, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // whether or not this weight is a stub auto is_stub_weight = [weights = weights->view().begin<double>()] __device__(size_type i) { return weights[i] == 0; }; // whether or not this particular tdigest is a stub auto is_stub_digest = [offsets = offsets->view().begin<offset_type>(), is_stub_weight] __device__( size_type i) { return is_stub_weight(offsets[i]) ? 1 : 0; }; size_type const num_stubs = [&]() { if (!has_nulls) { return 0; } auto iter = cudf::detail::make_counting_transform_iterator(0, is_stub_digest); return thrust::reduce(rmm::exec_policy(stream), iter, iter + num_rows); }(); // if there are no stub tdigests, we can return immediately. if (num_stubs == 0) { return cudf::detail::tdigest::make_tdigest_column(num_rows, std::move(means), std::move(weights), std::move(offsets), std::move(min_col), std::move(max_col), stream, mr); } // otherwise we need to strip out the stubs. auto remove_stubs = [&](column_view const& col, size_type num_stubs) { auto result = cudf::make_numeric_column( data_type{type_id::FLOAT64}, col.size() - num_stubs, mask_state::UNALLOCATED, stream, mr); thrust::remove_copy_if(rmm::exec_policy(stream), col.begin<double>(), col.end<double>(), thrust::make_counting_iterator(0), result->mutable_view().begin<double>(), is_stub_weight); return result; }; // remove from the means and weights column auto _means = remove_stubs(*means, num_stubs); auto _weights = remove_stubs(*weights, num_stubs); // adjust offsets. rmm::device_uvector<offset_type> sizes(num_rows, stream); thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + num_rows, sizes.begin(), [offsets = offsets->view().begin<offset_type>()] __device__(size_type i) { return offsets[i + 1] - offsets[i]; }); auto iter = cudf::detail::make_counting_transform_iterator( 0, [sizes = sizes.begin(), is_stub_digest, num_rows] __device__(size_type i) { return i == num_rows || is_stub_digest(i) ? 0 : sizes[i]; }); thrust::exclusive_scan(rmm::exec_policy(stream), iter, iter + num_rows + 1, offsets->mutable_view().begin<offset_type>(), 0); // assemble final column return cudf::detail::tdigest::make_tdigest_column(num_rows, std::move(_means), std::move(_weights), std::move(offsets), std::move(min_col), std::move(max_col), stream, mr); } /** * @brief Compute a column of tdigests. * * Assembles the output tdigest column based on the specified delta, a stream of * input values (either scalar or centroids), and an assortment of per-group * clustering information. * * This function is effectively just a reduce_by_key that performs a reduction * from input values -> centroid clusters as defined by the the cluster weight * boundaries. * * @param delta tdigest compression level * @param values_begin Beginning of the range of input values. * @param values_end End of the range of input values. * @param cumulative_weight Functor which returns cumulative weight and group information for * an absolute input value index. * @param min_col Column containing the minimum value per group. * @param max_col Column containing the maximum value per group. * @param group_cluster_wl Cluster weight limits for each group. * @param group_cluster_offsets R-value reference of offsets into the cluster weight limits. * @param total_clusters Total number of clusters in all groups. * @param has_nulls Whether or not the input contains nulls * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory * * @returns A tdigest column with 1 row per output tdigest. */ template <typename CentroidIter, typename CumulativeWeight> std::unique_ptr<column> compute_tdigests(int delta, CentroidIter centroids_begin, CentroidIter centroids_end, CumulativeWeight group_cumulative_weight, std::unique_ptr<column>&& min_col, std::unique_ptr<column>&& max_col, rmm::device_uvector<double> const& group_cluster_wl, std::unique_ptr<column>&& group_cluster_offsets, size_type total_clusters, bool has_nulls, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // the output for each group is a column of data that represents the tdigest. since we want 1 row // per group, each row will be a list the length of the tdigest for that group. so our output // column is of the form: // struct { // centroids for the digest // list { // struct { // double // mean // double // weight // } // } // double // min // double // max // } // if (total_clusters == 0) { return cudf::detail::tdigest::make_empty_tdigest_column(stream, mr); } // each input group represents an individual tdigest. within each tdigest, we want the keys // to represent cluster indices (for example, if a tdigest had 100 clusters, the keys should fall // into the range 0-99). But since we have multiple tdigests, we need to keep the keys unique // between the groups, so we add our group start offset. auto keys = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [delta, group_cluster_wl = group_cluster_wl.data(), group_cluster_offsets = group_cluster_offsets->view().begin<offset_type>(), group_cumulative_weight] __device__(size_type value_index) -> size_type { // get group index, relative value index within the group and cumulative weight. [[maybe_unused]] auto [group_index, relative_value_index, cumulative_weight] = group_cumulative_weight(value_index); auto const num_clusters = group_cluster_offsets[group_index + 1] - group_cluster_offsets[group_index]; if (num_clusters == 0) { return group_cluster_offsets[group_index]; } // compute start of cluster weight limits for this group double const* weight_limits = group_cluster_wl + group_cluster_offsets[group_index]; // local cluster index size_type const group_cluster_index = min(num_clusters - 1, static_cast<size_type>( thrust::lower_bound( thrust::seq, weight_limits, weight_limits + num_clusters, cumulative_weight) - weight_limits)); // add the cluster offset to generate a globally unique key return group_cluster_index + group_cluster_offsets[group_index]; }); // mean and weight data auto centroid_means = cudf::make_numeric_column( data_type{type_id::FLOAT64}, total_clusters, mask_state::UNALLOCATED, stream, mr); auto centroid_weights = cudf::make_numeric_column( data_type{type_id::FLOAT64}, total_clusters, mask_state::UNALLOCATED, stream, mr); // reduce the centroids down by key. cudf::mutable_column_view mean_col(*centroid_means); cudf::mutable_column_view weight_col(*centroid_weights); // reduce the centroids into the clusters auto output = thrust::make_zip_iterator(thrust::make_tuple( mean_col.begin<double>(), weight_col.begin<double>(), thrust::make_discard_iterator())); auto const num_values = std::distance(centroids_begin, centroids_end); thrust::reduce_by_key(rmm::exec_policy(stream), keys, keys + num_values, // keys centroids_begin, // values thrust::make_discard_iterator(), // key output output, // output thrust::equal_to{}, // key equality check merge_centroids{}); // create final tdigest column return build_output_column(group_cluster_offsets->size() - 1, std::move(centroid_means), std::move(centroid_weights), std::move(group_cluster_offsets), std::move(min_col), std::move(max_col), has_nulls, stream, mr); } // return the min/max value of scalar inputs by group index template <typename T> struct get_scalar_minmax { column_device_view const col; device_span<size_type const> group_offsets; size_type const* group_valid_counts; __device__ thrust::tuple<double, double> operator()(size_type group_index) { auto const valid_count = group_valid_counts[group_index]; return valid_count > 0 ? thrust::make_tuple( static_cast<double>(col.element<T>(group_offsets[group_index])), static_cast<double>(col.element<T>(group_offsets[group_index] + valid_count - 1))) : thrust::make_tuple(0.0, 0.0); } }; struct typed_group_tdigest { template < typename T, typename std::enable_if_t<cudf::is_numeric<T>() || cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(column_view const& col, cudf::device_span<size_type const> group_offsets, cudf::device_span<size_type const> group_labels, cudf::device_span<size_type const> group_valid_counts, size_type num_groups, int delta, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // first, generate cluster weight information for each input group auto [group_cluster_wl, group_cluster_offsets, total_clusters] = generate_group_cluster_info( delta, num_groups, nearest_value_scalar_weights{group_offsets.begin()}, scalar_group_info{group_valid_counts.begin(), group_offsets.begin()}, cumulative_scalar_weight{group_offsets, group_labels}, col.null_count() > 0, stream, mr); // device column view. handy because the .element() function // automatically handles fixed-point conversions for us auto d_col = cudf::column_device_view::create(col, stream); // compute min and max columns auto min_col = cudf::make_numeric_column( data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr); auto max_col = cudf::make_numeric_column( data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr); thrust::transform( rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + num_groups, thrust::make_zip_iterator(thrust::make_tuple(min_col->mutable_view().begin<double>(), max_col->mutable_view().begin<double>())), get_scalar_minmax<T>{*d_col, group_offsets, group_valid_counts.begin()}); // for simple input values, the "centroids" all have a weight of 1. auto scalar_to_centroid = cudf::detail::make_counting_transform_iterator(0, make_centroid<T>{*d_col}); // generate the final tdigest return compute_tdigests(delta, scalar_to_centroid, scalar_to_centroid + col.size(), cumulative_scalar_weight{group_offsets, group_labels}, std::move(min_col), std::move(max_col), group_cluster_wl, std::move(group_cluster_offsets), total_clusters, col.null_count() > 0, stream, mr); } template < typename T, typename... Args, typename std::enable_if_t<!cudf::is_numeric<T>() && !cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(Args&&...) { CUDF_FAIL("Non-numeric type in group_tdigest"); } }; } // anonymous namespace std::unique_ptr<column> group_tdigest(column_view const& col, cudf::device_span<size_type const> group_offsets, cudf::device_span<size_type const> group_labels, cudf::device_span<size_type const> group_valid_counts, size_type num_groups, int max_centroids, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (col.size() == 0) { return cudf::detail::tdigest::make_empty_tdigest_column(stream, mr); } auto const delta = max_centroids; return cudf::type_dispatcher(col.type(), typed_group_tdigest{}, col, group_offsets, group_labels, group_valid_counts, num_groups, delta, stream, mr); } std::unique_ptr<column> group_merge_tdigest(column_view const& input, cudf::device_span<size_type const> group_offsets, cudf::device_span<size_type const> group_labels, size_type num_groups, int max_centroids, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { tdigest_column_view tdv(input); if (num_groups == 0 || input.size() == 0) { return cudf::detail::tdigest::make_empty_tdigest_column(stream, mr); } // first step is to merge all the tdigests in each group. at the moment the only way to // make this work is to retrieve the group sizes (via group_offsets) and the individual digest // sizes (via input.offsets()) to the gpu and do the merges. The scale problem is that while the // size of each group will likely be small (size of each group will typically map to # of batches // the input data was chopped into for tdigest generation), the -number- of groups can be // arbitrarily large. // // thrust::merge and thrust::merge_by_key don't provide what we need. What we would need is an // algorithm like a super-merge that takes two layers of keys: one which identifies the outer // grouping of tdigests, and one which identifies the inner groupings of the tdigests within the // outer groups. // bring group offsets back to the host std::vector<size_type> h_outer_offsets(group_offsets.size()); cudaMemcpyAsync(h_outer_offsets.data(), group_offsets.data(), sizeof(size_type) * group_offsets.size(), cudaMemcpyDeviceToHost, stream); // bring tdigest offsets back to the host auto tdigest_offsets = tdv.centroids().offsets(); std::vector<size_type> h_inner_offsets(tdigest_offsets.size()); cudaMemcpyAsync(h_inner_offsets.data(), tdigest_offsets.begin<size_type>(), sizeof(size_type) * tdigest_offsets.size(), cudaMemcpyDeviceToHost, stream); stream.synchronize(); // extract all means and weights into a table cudf::table_view tdigests_unsliced({tdv.means(), tdv.weights()}); // generate the merged (but not yet compressed) tdigests for each group. std::vector<std::unique_ptr<table>> tdigests; tdigests.reserve(num_groups); std::transform( h_outer_offsets.begin(), h_outer_offsets.end() - 1, std::next(h_outer_offsets.begin()), std::back_inserter(tdigests), [&](auto tdigest_start, auto tdigest_end) { // the range of tdigests in this group auto const num_tdigests = tdigest_end - tdigest_start; // slice each tdigest from the input std::vector<table_view> unmerged_tdigests; unmerged_tdigests.reserve(num_tdigests); auto offset_iter = std::next(h_inner_offsets.begin(), tdigest_start); std::transform(offset_iter, offset_iter + num_tdigests, std::next(offset_iter), std::back_inserter(unmerged_tdigests), [&](auto start, auto end) { return cudf::detail::slice(tdigests_unsliced, {start, end}, stream); }); // merge return cudf::detail::merge(unmerged_tdigests, {0}, {order::ASCENDING}, {}, stream, mr); }); // generate min and max values auto merged_min_col = cudf::make_numeric_column( data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr); auto min_iter = thrust::make_transform_iterator( thrust::make_zip_iterator(thrust::make_tuple(tdv.min_begin(), tdv.size_begin())), tdigest_min{}); thrust::reduce_by_key(rmm::exec_policy(stream), group_labels.begin(), group_labels.end(), min_iter, thrust::make_discard_iterator(), merged_min_col->mutable_view().begin<double>(), thrust::equal_to{}, // key equality check thrust::minimum{}); auto merged_max_col = cudf::make_numeric_column( data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr); auto max_iter = thrust::make_transform_iterator( thrust::make_zip_iterator(thrust::make_tuple(tdv.max_begin(), tdv.size_begin())), tdigest_max{}); thrust::reduce_by_key(rmm::exec_policy(stream), group_labels.begin(), group_labels.end(), max_iter, thrust::make_discard_iterator(), merged_max_col->mutable_view().begin<double>(), thrust::equal_to{}, // key equality check thrust::maximum{}); // for any empty groups, set the min and max to be 0. not technically necessary but it makes // testing simpler. auto group_num_weights = cudf::detail::make_counting_transform_iterator( 0, [outer_offsets = group_offsets.data(), inner_offsets = tdigest_offsets.begin<size_type>()] __device__(size_type group_index) -> size_type { auto const tdigest_begin = outer_offsets[group_index]; auto const tdigest_end = outer_offsets[group_index + 1]; return inner_offsets[tdigest_end] - inner_offsets[tdigest_begin]; }); auto group_is_empty = [] __device__(size_type group_size) { return group_size == 0; }; thrust::replace_if(rmm::exec_policy(stream), merged_min_col->mutable_view().begin<double>(), merged_min_col->mutable_view().end<double>(), group_num_weights, group_is_empty, 0); thrust::replace_if(rmm::exec_policy(stream), merged_max_col->mutable_view().begin<double>(), merged_max_col->mutable_view().end<double>(), group_num_weights, group_is_empty, 0); // concatenate all the merged tdigests back into one table. std::vector<table_view> tdigest_views; tdigest_views.reserve(num_groups); std::transform(tdigests.begin(), tdigests.end(), std::back_inserter(tdigest_views), [](std::unique_ptr<table> const& t) { return t->view(); }); auto merged = cudf::detail::concatenate(tdigest_views, stream, mr); // generate cumulative weights auto merged_weights = merged->get_column(1).view(); auto cumulative_weights = cudf::make_numeric_column( data_type{type_id::FLOAT64}, merged_weights.size(), mask_state::UNALLOCATED); auto keys = cudf::detail::make_counting_transform_iterator( 0, [group_labels = group_labels.begin(), inner_offsets = tdigest_offsets.begin<size_type>(), num_inner_offsets = tdigest_offsets.size()] __device__(int index) { // what -original- tdigest index this absolute index corresponds to auto const iter = thrust::prev( thrust::upper_bound(thrust::seq, inner_offsets, inner_offsets + num_inner_offsets, index)); auto const tdigest_index = thrust::distance(inner_offsets, iter); // what group index the original tdigest belongs to return group_labels[tdigest_index]; }); thrust::inclusive_scan_by_key(rmm::exec_policy(stream), keys, keys + cumulative_weights->size(), merged_weights.begin<double>(), cumulative_weights->mutable_view().begin<double>()); auto const delta = max_centroids; // generate cluster info auto [group_cluster_wl, group_cluster_offsets, total_clusters] = generate_group_cluster_info( delta, num_groups, nearest_value_centroid_weights{cumulative_weights->view().begin<double>(), group_offsets.data(), tdigest_offsets.begin<size_type>()}, centroid_group_info{cumulative_weights->view().begin<double>(), group_offsets.data(), tdigest_offsets.begin<size_type>()}, cumulative_centroid_weight{ cumulative_weights->view().begin<double>(), group_labels, group_offsets.data(), {tdigest_offsets.begin<offset_type>(), static_cast<size_t>(tdigest_offsets.size())}}, false, stream, mr); // input centroid values auto centroids = cudf::detail::make_counting_transform_iterator( 0, make_weighted_centroid{merged->get_column(0).view().begin<double>(), merged_weights.begin<double>()}); // compute the tdigest return compute_tdigests(delta, centroids, centroids + merged->num_rows(), cumulative_centroid_weight{cumulative_weights->view().begin<double>(), group_labels, group_offsets.data(), {tdigest_offsets.begin<offset_type>(), static_cast<size_t>(tdigest_offsets.size())}}, std::move(merged_min_col), std::move(merged_max_col), group_cluster_wl, std::move(group_cluster_offsets), total_clusters, false, stream, mr); } } // namespace detail } // namespace groupby } // namespace cudf
a9c59c8d175243cccb6029521f46a6b7dda745ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> __global__ void add(int *a, int *b, int *c) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } int main(void) { // Number of blocks int N = 512; // host copies of a, b, c int *a, *b, *c; // device copies of a, b, c int *d_a, *d_b, *d_c; int size = N * sizeof(int); // Allocate memory to host copies a = (int *) malloc(size); b = (int *) malloc(size); c = (int *) malloc(size); // Initialize vectors for (int i = 0; i < N; ++i) { a[i] = 2; b[i] = 7; } // Allocate memory to device copies hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, size); // Copy inputs from host to device hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); // Launch add() kernel of GPU hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, d_a, d_b, d_c); // Copy result from device to host hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost); // Print for(int i = 0; i < N; ++i) { std::cout << c[i] << " "; } std::cout << std::endl; // Cleanup free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
a9c59c8d175243cccb6029521f46a6b7dda745ee.cu
#include <iostream> __global__ void add(int *a, int *b, int *c) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } int main(void) { // Number of blocks int N = 512; // host copies of a, b, c int *a, *b, *c; // device copies of a, b, c int *d_a, *d_b, *d_c; int size = N * sizeof(int); // Allocate memory to host copies a = (int *) malloc(size); b = (int *) malloc(size); c = (int *) malloc(size); // Initialize vectors for (int i = 0; i < N; ++i) { a[i] = 2; b[i] = 7; } // Allocate memory to device copies cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Copy inputs from host to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch add() kernel of GPU add<<<N,1>>>(d_a, d_b, d_c); // Copy result from device to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); // Print for(int i = 0; i < N; ++i) { std::cout << c[i] << " "; } std::cout << std::endl; // Cleanup free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
16048d24797f4e754efaa02c0b484294c144e691.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/zgemm_reduce.cu, normal z -> c, Mon Jun 25 18:24:11 2018 */ #include "magma_internal.h" #include "magma_templates.h" // size of work for a thread block #define BLK_M 16 #define BLK_N 16 // BLK_K gets defined in magmablas_cgemm_reduce, // because it depends on the CUDA architecture at runtime. /******************************************************************************/ // BLK_K size is templated, as it depends on CUDA architecture at runtime. // Hmm... how to compile for both CUDA arch 1.x and 2.x? template< int BLK_K > __global__ void cgemm_reduce_kernel( int m, int n, int k, magmaFloatComplex alpha, const magmaFloatComplex* __restrict__ dA, int lda, const magmaFloatComplex* __restrict__ dB, int ldb, magmaFloatComplex beta, magmaFloatComplex * __restrict__ dC, int ldc) { #if (__CUDA_ARCH__ >= 200) const int tx = threadIdx.x; if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) { dA += (blockIdx.x*BLK_M + threadIdx.y) * lda; dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb; dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc; // was: sum[BLK_M][BLK_N+1][BLK_K+1]; // moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer. __shared__ magmaFloatComplex sum[BLK_K][BLK_M+1][BLK_N+1]; magmaFloatComplex lsum; /* w := v**H * C */ lsum = MAGMA_C_ZERO; for( int j = tx; j < k; j += BLK_K ) lsum += MAGMA_C_CONJ( dA[j] )* dB[j]; sum[tx][threadIdx.y][threadIdx.z] = lsum; magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum ); /* C := C - v * w */ __syncthreads(); if (threadIdx.x == 0) { if (MAGMA_C_EQUAL(beta, MAGMA_C_ZERO)) dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z]; else dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] + alpha*sum[0][threadIdx.y][threadIdx.z]; } } #endif } /***************************************************************************//** Purpose ------- CGEMM_REDUCE performs one of the matrix-matrix operations C := alpha*A^T*B + beta*C, where alpha and beta are scalars, and A, B and C are matrices, with A a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix. This routine is tuned for m, n << k. Typically, m and n are expected to be less than 128. @ingroup magma_gemm *******************************************************************************/ extern "C" void magmablas_cgemm_reduce( magma_int_t m, magma_int_t n, magma_int_t k, magmaFloatComplex alpha, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_const_ptr dB, magma_int_t lddb, magmaFloatComplex beta, magmaFloatComplex_ptr dC, magma_int_t lddc, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( k < 0 ) info = -3; else if ( ldda < m ) info = -6; else if ( lddb < k ) info = -8; else if ( lddc < m ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // call CUDA ARCH 1.x -- maximum 512 threads const int NUM_THREADS = 512; const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2 dim3 threads( BLK_K, BLK_M, BLK_N ); dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 ); hipLaunchKernelGGL(( cgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc ); } else { // -------------------- // call CUDA ARCH 2.x -- maximum 1024 threads const int NUM_THREADS = 1024; const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4 dim3 threads( BLK_K, BLK_M, BLK_N ); dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 ); hipLaunchKernelGGL(( cgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc ); } }
16048d24797f4e754efaa02c0b484294c144e691.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/zgemm_reduce.cu, normal z -> c, Mon Jun 25 18:24:11 2018 */ #include "magma_internal.h" #include "magma_templates.h" // size of work for a thread block #define BLK_M 16 #define BLK_N 16 // BLK_K gets defined in magmablas_cgemm_reduce, // because it depends on the CUDA architecture at runtime. /******************************************************************************/ // BLK_K size is templated, as it depends on CUDA architecture at runtime. // Hmm... how to compile for both CUDA arch 1.x and 2.x? template< int BLK_K > __global__ void cgemm_reduce_kernel( int m, int n, int k, magmaFloatComplex alpha, const magmaFloatComplex* __restrict__ dA, int lda, const magmaFloatComplex* __restrict__ dB, int ldb, magmaFloatComplex beta, magmaFloatComplex * __restrict__ dC, int ldc) { #if (__CUDA_ARCH__ >= 200) const int tx = threadIdx.x; if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) { dA += (blockIdx.x*BLK_M + threadIdx.y) * lda; dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb; dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc; // was: sum[BLK_M][BLK_N+1][BLK_K+1]; // moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer. __shared__ magmaFloatComplex sum[BLK_K][BLK_M+1][BLK_N+1]; magmaFloatComplex lsum; /* w := v**H * C */ lsum = MAGMA_C_ZERO; for( int j = tx; j < k; j += BLK_K ) lsum += MAGMA_C_CONJ( dA[j] )* dB[j]; sum[tx][threadIdx.y][threadIdx.z] = lsum; magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum ); /* C := C - v * w */ __syncthreads(); if (threadIdx.x == 0) { if (MAGMA_C_EQUAL(beta, MAGMA_C_ZERO)) dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z]; else dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] + alpha*sum[0][threadIdx.y][threadIdx.z]; } } #endif } /***************************************************************************//** Purpose ------- CGEMM_REDUCE performs one of the matrix-matrix operations C := alpha*A^T*B + beta*C, where alpha and beta are scalars, and A, B and C are matrices, with A a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix. This routine is tuned for m, n << k. Typically, m and n are expected to be less than 128. @ingroup magma_gemm *******************************************************************************/ extern "C" void magmablas_cgemm_reduce( magma_int_t m, magma_int_t n, magma_int_t k, magmaFloatComplex alpha, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_const_ptr dB, magma_int_t lddb, magmaFloatComplex beta, magmaFloatComplex_ptr dC, magma_int_t lddc, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( k < 0 ) info = -3; else if ( ldda < m ) info = -6; else if ( lddb < k ) info = -8; else if ( lddc < m ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // call CUDA ARCH 1.x -- maximum 512 threads const int NUM_THREADS = 512; const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2 dim3 threads( BLK_K, BLK_M, BLK_N ); dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 ); cgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, queue->cuda_stream() >>> ( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc ); } else { // -------------------- // call CUDA ARCH 2.x -- maximum 1024 threads const int NUM_THREADS = 1024; const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4 dim3 threads( BLK_K, BLK_M, BLK_N ); dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 ); cgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, queue->cuda_stream() >>> ( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc ); } }
28f4f478bd4b6311e23d23918b43225396e1121f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" ////////////// // mask // ////////////// __global__ void knl_mask(MaskEventsIn *evsI, bool *mask) { int iev = blockDim.x*blockIdx.x + threadIdx.x; if (iev < evsI->nev) { bool isPass = false; if ( (evsI->HLT_Ele32_WPTight_Gsf[iev] || evsI->HLT_IsoMu24[iev]) && (evsI->nElectron[iev]>=2 || evsI->nMuon[iev]>=2) ){ isPass = true; } mask[iev] = isPass; } }
28f4f478bd4b6311e23d23918b43225396e1121f.cu
////////////// // mask // ////////////// __global__ void knl_mask(MaskEventsIn *evsI, bool *mask) { int iev = blockDim.x*blockIdx.x + threadIdx.x; if (iev < evsI->nev) { bool isPass = false; if ( (evsI->HLT_Ele32_WPTight_Gsf[iev] || evsI->HLT_IsoMu24[iev]) && (evsI->nElectron[iev]>=2 || evsI->nMuon[iev]>=2) ){ isPass = true; } mask[iev] = isPass; } }
5d0ee1f26887e704e23f0cb656f996a447fe2de1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //blackScholesAnalyticEngineKernels.cu //Scott Grauer-Gray //Kernels for running black scholes using the analytic engine #ifndef BLACK_SCHOLES_ANALYTIC_ENGINE_KERNELS_CU #define BLACK_SCHOLES_ANALYTIC_ENGINE_KERNELS_CU //declarations for the kernels #include "blackScholesAnalyticEngineKernels.cuh" //needed for the constants in the error function #include "errorFunctConsts.cuh" //device kernel to retrieve the compound factor in interestRate __device__ float interestRateCompoundFactor(float t, yieldTermStruct currYieldTermStruct) { return (exp((currYieldTermStruct.forward)*t)); } //device kernel to retrieve the discount factor in interestRate __device__ float interestRateDiscountFactor(float t, yieldTermStruct currYieldTermStruct) { return 1.0f / interestRateCompoundFactor(t, currYieldTermStruct); } //device function to get the variance of the black volatility function __device__ float getBlackVolBlackVar(blackVolStruct volTS) { float vol = volTS.volatility; return vol*vol*volTS.timeYearFraction; } //device function to get the discount on a dividend yield __device__ float getDiscountOnDividendYield(float yearFraction, yieldTermStruct dividendYieldTermStruct) { float intDiscountFactor = interestRateDiscountFactor(yearFraction, dividendYieldTermStruct); return intDiscountFactor; } //device function to get the discount on the risk free rate __device__ float getDiscountOnRiskFreeRate(float yearFraction, yieldTermStruct riskFreeRateYieldTermStruct) { return interestRateDiscountFactor(yearFraction, riskFreeRateYieldTermStruct); } //device kernel to run the error function __device__ float errorFunct(normalDistStruct normDist, float x) { float R,S,P,Q,s,y,z,r, ax; ax = fabsf(x); if(ax < 0.84375f) { if(ax < 3.7252902984e-09f) { if (ax < FLT_MIN*16.0f) return 0.125f*(8.0f*x+ (ERROR_FUNCT_efx8)*x); /*avoid underflow */ return x + (ERROR_FUNCT_efx)*x; } z = x*x; r = ERROR_FUNCT_pp0+z*(ERROR_FUNCT_pp1+z*(ERROR_FUNCT_pp2+z*(ERROR_FUNCT_pp3+z*ERROR_FUNCT_pp4))); s = ERROR_FUNCT_one+z*(ERROR_FUNCT_qq1+z*(ERROR_FUNCT_qq2+z*(ERROR_FUNCT_qq3+z*(ERROR_FUNCT_qq4+z*ERROR_FUNCT_qq5)))); y = r/s; return x + x*y; } if(ax <1.25f) { s = ax-ERROR_FUNCT_one; P = ERROR_FUNCT_pa0+s*(ERROR_FUNCT_pa1+s*(ERROR_FUNCT_pa2+s*(ERROR_FUNCT_pa3+s*(ERROR_FUNCT_pa4+s*(ERROR_FUNCT_pa5+s*ERROR_FUNCT_pa6))))); Q = ERROR_FUNCT_one+s*(ERROR_FUNCT_qa1+s*(ERROR_FUNCT_qa2+s*(ERROR_FUNCT_qa3+s*(ERROR_FUNCT_qa4+s*(ERROR_FUNCT_qa5+s*ERROR_FUNCT_qa6))))); if(x>=0.0f) return ERROR_FUNCT_erx + P/Q; else return -1.0f*ERROR_FUNCT_erx - P/Q; } if (ax >= 6.0f) { if(x>=0.0f) return ERROR_FUNCT_one-ERROR_FUNCT_tiny; else return ERROR_FUNCT_tiny-ERROR_FUNCT_one; } /* Starts to lose accuracy when ax~5 */ s = ERROR_FUNCT_one/(ax*ax); if(ax < 2.85714285714285f) { /* |x| < 1/0.35 */ R = ERROR_FUNCT_ra0+s*(ERROR_FUNCT_ra1+s*(ERROR_FUNCT_ra2+s*(ERROR_FUNCT_ra3+s*(ERROR_FUNCT_ra4+s*(ERROR_FUNCT_ra5+s*(ERROR_FUNCT_ra6+s*ERROR_FUNCT_ra7)))))); S = ERROR_FUNCT_one+s*(ERROR_FUNCT_sa1+s*(ERROR_FUNCT_sa2+s*(ERROR_FUNCT_sa3+s*(ERROR_FUNCT_sa4+s*(ERROR_FUNCT_sa5+s*(ERROR_FUNCT_sa6+s*(ERROR_FUNCT_sa7+s*ERROR_FUNCT_sa8))))))); } else { /* |x| >= 1/0.35 */ R=ERROR_FUNCT_rb0+s*(ERROR_FUNCT_rb1+s*(ERROR_FUNCT_rb2+s*(ERROR_FUNCT_rb3+s*(ERROR_FUNCT_rb4+s*(ERROR_FUNCT_rb5+s*ERROR_FUNCT_rb6))))); S=ERROR_FUNCT_one+s*(ERROR_FUNCT_sb1+s*(ERROR_FUNCT_sb2+s*(ERROR_FUNCT_sb3+s*(ERROR_FUNCT_sb4+s*(ERROR_FUNCT_sb5+s*(ERROR_FUNCT_sb6+s*ERROR_FUNCT_sb7)))))); } r = expf( -ax*ax-0.5625f +R/S); if(x>=0.0f) return ERROR_FUNCT_one-r/ax; else return r/ax-ERROR_FUNCT_one; } //device kernel to run the operator function in cumulative normal distribution __device__ float cumNormDistOp(normalDistStruct normDist, float z) { z = (z - normDist.average) / normDist.sigma; float result = 0.5f * ( 1.0f + errorFunct(normDist, z*M_SQRT_2 ) ); return result; } //device kernel to run the gaussian function in the normal distribution __device__ float gaussianFunctNormDist(normalDistStruct normDist, float x) { float deltax = x - normDist.average; float exponent = -(deltax*deltax)/normDist.denominator; // debian alpha had some strange problem in the very-low range return exponent <= -690.0f ? 0.0f : // exp(x) < 1.0e-300 anyway normDist.normalizationFactor * expf(exponent); } //device kernel to retrieve the derivative in a cumulative normal distribution __device__ float cumNormDistDeriv(normalDistStruct normDist, float x) { float xn = (x - normDist.average) / normDist.sigma; return gaussianFunctNormDist(normDist, xn) / normDist.sigma; } //device function to initialize the cumulative normal distribution structure __device__ void initCumNormDist(normalDistStruct& currCumNormDist) { currCumNormDist.average = 0.0f; currCumNormDist.sigma = 1.0f; currCumNormDist.normalizationFactor = M_SQRT_2*M_1_SQRTPI/currCumNormDist.sigma; currCumNormDist.derNormalizationFactor = currCumNormDist.sigma*currCumNormDist.sigma; currCumNormDist.denominator = 2.0f*currCumNormDist.derNormalizationFactor; } //device function to initialize variable in the black calculator __device__ void initBlackCalcVars(blackCalcStruct& blackCalculator, payoffStruct payoff) { blackCalculator.d1 = log(blackCalculator.forward / blackCalculator.strike)/blackCalculator.stdDev + 0.5f*blackCalculator.stdDev; blackCalculator.d2 = blackCalculator.d1 - blackCalculator.stdDev; //initialize the cumulative normal distribution structure normalDistStruct currCumNormDist; initCumNormDist(currCumNormDist); blackCalculator.cum_d1 = cumNormDistOp(currCumNormDist, blackCalculator.d1); blackCalculator.cum_d2 = cumNormDistOp(currCumNormDist, blackCalculator.d2); blackCalculator.n_d1 = cumNormDistDeriv(currCumNormDist, blackCalculator.d1); blackCalculator.n_d2 = cumNormDistDeriv(currCumNormDist, blackCalculator.d2); blackCalculator.x = payoff.strike; blackCalculator.DxDstrike = 1.0f; // the following one will probably disappear as soon as // super-share will be properly handled blackCalculator.DxDs = 0.0f; // this part is always executed. // in case of plain-vanilla payoffs, it is also the only part // which is executed. switch (payoff.type) { case CALL: blackCalculator.alpha = blackCalculator.cum_d1;// N(d1) blackCalculator.DalphaDd1 = blackCalculator.n_d1;// n(d1) blackCalculator.beta = -1.0f*blackCalculator.cum_d2;// -N(d2) blackCalculator.DbetaDd2 = -1.0f*blackCalculator.n_d2;// -n(d2) break; case PUT: blackCalculator.alpha = -1.0f+blackCalculator.cum_d1;// -N(-d1) blackCalculator.DalphaDd1 = blackCalculator.n_d1;// n( d1) blackCalculator.beta = 1.0f-blackCalculator.cum_d2;// N(-d2) blackCalculator.DbetaDd2 = -1.0f* blackCalculator.n_d2;// -n( d2) break; } } //device function to initialize the black calculator __device__ void initBlackCalculator(blackCalcStruct& blackCalc, payoffStruct payoff, float forwardPrice, float stdDev, float riskFreeDiscount) { blackCalc.strike = payoff.strike; blackCalc.forward = forwardPrice; blackCalc.stdDev = stdDev; blackCalc.discount = riskFreeDiscount; blackCalc.variance = stdDev * stdDev; initBlackCalcVars(blackCalc, payoff); } //device function to retrieve the output resulting value __device__ float getResultVal(blackCalcStruct blackCalculator) { float result = blackCalculator.discount * (blackCalculator.forward * blackCalculator.alpha + blackCalculator.x * blackCalculator.beta); return result; } //global function to retrieve the output value for an option __global__ void getOutValOption(optionInputStruct* options, float* outputVals, int numVals) { int optionNum = blockIdx.x * blockDim.x + threadIdx.x; //check if within current options if (optionNum < numVals) { optionInputStruct threadOption = options[optionNum]; payoffStruct currPayoff; currPayoff.type = threadOption.type; currPayoff.strike = threadOption.strike; yieldTermStruct qTS; qTS.timeYearFraction = threadOption.t; qTS.forward = threadOption.q; yieldTermStruct rTS; rTS.timeYearFraction = threadOption.t; rTS.forward = threadOption.r; blackVolStruct volTS; volTS.timeYearFraction = threadOption.t; volTS.volatility = threadOption.vol; blackScholesMertStruct stochProcess; stochProcess.x0 = threadOption.spot; stochProcess.dividendTS = qTS; stochProcess.riskFreeTS = rTS; stochProcess.blackVolTS = volTS; optionStruct currOption; currOption.payoff = currPayoff; currOption.yearFractionTime = threadOption.t; currOption.pricingEngine = stochProcess; float variance = getBlackVolBlackVar(currOption.pricingEngine.blackVolTS); float dividendDiscount = getDiscountOnDividendYield(currOption.yearFractionTime, currOption.pricingEngine.dividendTS); float riskFreeDiscount = getDiscountOnRiskFreeRate(currOption.yearFractionTime, currOption.pricingEngine.riskFreeTS); float spot = currOption.pricingEngine.x0; float forwardPrice = spot * dividendDiscount / riskFreeDiscount; //declare the blackCalcStruct blackCalcStruct blackCalc; //initialize the calculator initBlackCalculator(blackCalc, currOption.payoff, forwardPrice, sqrt(variance), riskFreeDiscount); //retrieve the results values float resultVal = getResultVal(blackCalc); //write the resulting value to global memory outputVals[optionNum] = resultVal; } } #endif //BLACK_SCHOLES_ANALYTIC_ENGINE_KERNELS_CU
5d0ee1f26887e704e23f0cb656f996a447fe2de1.cu
//blackScholesAnalyticEngineKernels.cu //Scott Grauer-Gray //Kernels for running black scholes using the analytic engine #ifndef BLACK_SCHOLES_ANALYTIC_ENGINE_KERNELS_CU #define BLACK_SCHOLES_ANALYTIC_ENGINE_KERNELS_CU //declarations for the kernels #include "blackScholesAnalyticEngineKernels.cuh" //needed for the constants in the error function #include "errorFunctConsts.cuh" //device kernel to retrieve the compound factor in interestRate __device__ float interestRateCompoundFactor(float t, yieldTermStruct currYieldTermStruct) { return (exp((currYieldTermStruct.forward)*t)); } //device kernel to retrieve the discount factor in interestRate __device__ float interestRateDiscountFactor(float t, yieldTermStruct currYieldTermStruct) { return 1.0f / interestRateCompoundFactor(t, currYieldTermStruct); } //device function to get the variance of the black volatility function __device__ float getBlackVolBlackVar(blackVolStruct volTS) { float vol = volTS.volatility; return vol*vol*volTS.timeYearFraction; } //device function to get the discount on a dividend yield __device__ float getDiscountOnDividendYield(float yearFraction, yieldTermStruct dividendYieldTermStruct) { float intDiscountFactor = interestRateDiscountFactor(yearFraction, dividendYieldTermStruct); return intDiscountFactor; } //device function to get the discount on the risk free rate __device__ float getDiscountOnRiskFreeRate(float yearFraction, yieldTermStruct riskFreeRateYieldTermStruct) { return interestRateDiscountFactor(yearFraction, riskFreeRateYieldTermStruct); } //device kernel to run the error function __device__ float errorFunct(normalDistStruct normDist, float x) { float R,S,P,Q,s,y,z,r, ax; ax = fabsf(x); if(ax < 0.84375f) { if(ax < 3.7252902984e-09f) { if (ax < FLT_MIN*16.0f) return 0.125f*(8.0f*x+ (ERROR_FUNCT_efx8)*x); /*avoid underflow */ return x + (ERROR_FUNCT_efx)*x; } z = x*x; r = ERROR_FUNCT_pp0+z*(ERROR_FUNCT_pp1+z*(ERROR_FUNCT_pp2+z*(ERROR_FUNCT_pp3+z*ERROR_FUNCT_pp4))); s = ERROR_FUNCT_one+z*(ERROR_FUNCT_qq1+z*(ERROR_FUNCT_qq2+z*(ERROR_FUNCT_qq3+z*(ERROR_FUNCT_qq4+z*ERROR_FUNCT_qq5)))); y = r/s; return x + x*y; } if(ax <1.25f) { s = ax-ERROR_FUNCT_one; P = ERROR_FUNCT_pa0+s*(ERROR_FUNCT_pa1+s*(ERROR_FUNCT_pa2+s*(ERROR_FUNCT_pa3+s*(ERROR_FUNCT_pa4+s*(ERROR_FUNCT_pa5+s*ERROR_FUNCT_pa6))))); Q = ERROR_FUNCT_one+s*(ERROR_FUNCT_qa1+s*(ERROR_FUNCT_qa2+s*(ERROR_FUNCT_qa3+s*(ERROR_FUNCT_qa4+s*(ERROR_FUNCT_qa5+s*ERROR_FUNCT_qa6))))); if(x>=0.0f) return ERROR_FUNCT_erx + P/Q; else return -1.0f*ERROR_FUNCT_erx - P/Q; } if (ax >= 6.0f) { if(x>=0.0f) return ERROR_FUNCT_one-ERROR_FUNCT_tiny; else return ERROR_FUNCT_tiny-ERROR_FUNCT_one; } /* Starts to lose accuracy when ax~5 */ s = ERROR_FUNCT_one/(ax*ax); if(ax < 2.85714285714285f) { /* |x| < 1/0.35 */ R = ERROR_FUNCT_ra0+s*(ERROR_FUNCT_ra1+s*(ERROR_FUNCT_ra2+s*(ERROR_FUNCT_ra3+s*(ERROR_FUNCT_ra4+s*(ERROR_FUNCT_ra5+s*(ERROR_FUNCT_ra6+s*ERROR_FUNCT_ra7)))))); S = ERROR_FUNCT_one+s*(ERROR_FUNCT_sa1+s*(ERROR_FUNCT_sa2+s*(ERROR_FUNCT_sa3+s*(ERROR_FUNCT_sa4+s*(ERROR_FUNCT_sa5+s*(ERROR_FUNCT_sa6+s*(ERROR_FUNCT_sa7+s*ERROR_FUNCT_sa8))))))); } else { /* |x| >= 1/0.35 */ R=ERROR_FUNCT_rb0+s*(ERROR_FUNCT_rb1+s*(ERROR_FUNCT_rb2+s*(ERROR_FUNCT_rb3+s*(ERROR_FUNCT_rb4+s*(ERROR_FUNCT_rb5+s*ERROR_FUNCT_rb6))))); S=ERROR_FUNCT_one+s*(ERROR_FUNCT_sb1+s*(ERROR_FUNCT_sb2+s*(ERROR_FUNCT_sb3+s*(ERROR_FUNCT_sb4+s*(ERROR_FUNCT_sb5+s*(ERROR_FUNCT_sb6+s*ERROR_FUNCT_sb7)))))); } r = expf( -ax*ax-0.5625f +R/S); if(x>=0.0f) return ERROR_FUNCT_one-r/ax; else return r/ax-ERROR_FUNCT_one; } //device kernel to run the operator function in cumulative normal distribution __device__ float cumNormDistOp(normalDistStruct normDist, float z) { z = (z - normDist.average) / normDist.sigma; float result = 0.5f * ( 1.0f + errorFunct(normDist, z*M_SQRT_2 ) ); return result; } //device kernel to run the gaussian function in the normal distribution __device__ float gaussianFunctNormDist(normalDistStruct normDist, float x) { float deltax = x - normDist.average; float exponent = -(deltax*deltax)/normDist.denominator; // debian alpha had some strange problem in the very-low range return exponent <= -690.0f ? 0.0f : // exp(x) < 1.0e-300 anyway normDist.normalizationFactor * expf(exponent); } //device kernel to retrieve the derivative in a cumulative normal distribution __device__ float cumNormDistDeriv(normalDistStruct normDist, float x) { float xn = (x - normDist.average) / normDist.sigma; return gaussianFunctNormDist(normDist, xn) / normDist.sigma; } //device function to initialize the cumulative normal distribution structure __device__ void initCumNormDist(normalDistStruct& currCumNormDist) { currCumNormDist.average = 0.0f; currCumNormDist.sigma = 1.0f; currCumNormDist.normalizationFactor = M_SQRT_2*M_1_SQRTPI/currCumNormDist.sigma; currCumNormDist.derNormalizationFactor = currCumNormDist.sigma*currCumNormDist.sigma; currCumNormDist.denominator = 2.0f*currCumNormDist.derNormalizationFactor; } //device function to initialize variable in the black calculator __device__ void initBlackCalcVars(blackCalcStruct& blackCalculator, payoffStruct payoff) { blackCalculator.d1 = log(blackCalculator.forward / blackCalculator.strike)/blackCalculator.stdDev + 0.5f*blackCalculator.stdDev; blackCalculator.d2 = blackCalculator.d1 - blackCalculator.stdDev; //initialize the cumulative normal distribution structure normalDistStruct currCumNormDist; initCumNormDist(currCumNormDist); blackCalculator.cum_d1 = cumNormDistOp(currCumNormDist, blackCalculator.d1); blackCalculator.cum_d2 = cumNormDistOp(currCumNormDist, blackCalculator.d2); blackCalculator.n_d1 = cumNormDistDeriv(currCumNormDist, blackCalculator.d1); blackCalculator.n_d2 = cumNormDistDeriv(currCumNormDist, blackCalculator.d2); blackCalculator.x = payoff.strike; blackCalculator.DxDstrike = 1.0f; // the following one will probably disappear as soon as // super-share will be properly handled blackCalculator.DxDs = 0.0f; // this part is always executed. // in case of plain-vanilla payoffs, it is also the only part // which is executed. switch (payoff.type) { case CALL: blackCalculator.alpha = blackCalculator.cum_d1;// N(d1) blackCalculator.DalphaDd1 = blackCalculator.n_d1;// n(d1) blackCalculator.beta = -1.0f*blackCalculator.cum_d2;// -N(d2) blackCalculator.DbetaDd2 = -1.0f*blackCalculator.n_d2;// -n(d2) break; case PUT: blackCalculator.alpha = -1.0f+blackCalculator.cum_d1;// -N(-d1) blackCalculator.DalphaDd1 = blackCalculator.n_d1;// n( d1) blackCalculator.beta = 1.0f-blackCalculator.cum_d2;// N(-d2) blackCalculator.DbetaDd2 = -1.0f* blackCalculator.n_d2;// -n( d2) break; } } //device function to initialize the black calculator __device__ void initBlackCalculator(blackCalcStruct& blackCalc, payoffStruct payoff, float forwardPrice, float stdDev, float riskFreeDiscount) { blackCalc.strike = payoff.strike; blackCalc.forward = forwardPrice; blackCalc.stdDev = stdDev; blackCalc.discount = riskFreeDiscount; blackCalc.variance = stdDev * stdDev; initBlackCalcVars(blackCalc, payoff); } //device function to retrieve the output resulting value __device__ float getResultVal(blackCalcStruct blackCalculator) { float result = blackCalculator.discount * (blackCalculator.forward * blackCalculator.alpha + blackCalculator.x * blackCalculator.beta); return result; } //global function to retrieve the output value for an option __global__ void getOutValOption(optionInputStruct* options, float* outputVals, int numVals) { int optionNum = blockIdx.x * blockDim.x + threadIdx.x; //check if within current options if (optionNum < numVals) { optionInputStruct threadOption = options[optionNum]; payoffStruct currPayoff; currPayoff.type = threadOption.type; currPayoff.strike = threadOption.strike; yieldTermStruct qTS; qTS.timeYearFraction = threadOption.t; qTS.forward = threadOption.q; yieldTermStruct rTS; rTS.timeYearFraction = threadOption.t; rTS.forward = threadOption.r; blackVolStruct volTS; volTS.timeYearFraction = threadOption.t; volTS.volatility = threadOption.vol; blackScholesMertStruct stochProcess; stochProcess.x0 = threadOption.spot; stochProcess.dividendTS = qTS; stochProcess.riskFreeTS = rTS; stochProcess.blackVolTS = volTS; optionStruct currOption; currOption.payoff = currPayoff; currOption.yearFractionTime = threadOption.t; currOption.pricingEngine = stochProcess; float variance = getBlackVolBlackVar(currOption.pricingEngine.blackVolTS); float dividendDiscount = getDiscountOnDividendYield(currOption.yearFractionTime, currOption.pricingEngine.dividendTS); float riskFreeDiscount = getDiscountOnRiskFreeRate(currOption.yearFractionTime, currOption.pricingEngine.riskFreeTS); float spot = currOption.pricingEngine.x0; float forwardPrice = spot * dividendDiscount / riskFreeDiscount; //declare the blackCalcStruct blackCalcStruct blackCalc; //initialize the calculator initBlackCalculator(blackCalc, currOption.payoff, forwardPrice, sqrt(variance), riskFreeDiscount); //retrieve the results values float resultVal = getResultVal(blackCalc); //write the resulting value to global memory outputVals[optionNum] = resultVal; } } #endif //BLACK_SCHOLES_ANALYTIC_ENGINE_KERNELS_CU
5a679d11ff185171e9f2395cdf1d78d1eed45428.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * JCudaVec - Vector operations for JCuda * http://www.jcuda.org * * Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org */ extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the floating point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in floating-point. extern "C" // Round to nearest integer value in floating-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two floating point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the floating-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision floating-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision floating-point remainder. extern "C" __global__ void vec_asinhf (size_t n, float *result, float *x) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { result[id] = asinhf(x[id]); } }
5a679d11ff185171e9f2395cdf1d78d1eed45428.cu
#include "includes.h" /* * JCudaVec - Vector operations for JCuda * http://www.jcuda.org * * Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org */ extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the floating point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in floating-point. extern "C" // Round to nearest integer value in floating-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two floating point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the floating-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision floating-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision floating-point remainder. extern "C" __global__ void vec_asinhf (size_t n, float *result, float *x) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { result[id] = asinhf(x[id]); } }
5091acb00c2a50c11d2f75e751f815785d75f9b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include <cfloat> #include "caffe/fast_rcnn_layers.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int aligned_height, const int aligned_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = bottom_rois[3] * spatial_scale; Dtype roi_end_h = bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 1.0); Dtype roi_height = max(roi_end_h - roi_start_h, 1.0); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(aligned_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(aligned_width); // Add roi offsets and clip to input boundaries const Dtype x = (static_cast<Dtype>(pw) + 0.5) * bin_size_w - 0.5; const Dtype y = (static_cast<Dtype>(ph) + 0.5) * bin_size_h - 0.5; const int x0 = static_cast<int>(x); const int y0 = static_cast<int>(y); const int x1 = min(x0 + 1, width); const int y1 = min(y0 + 1, height); Dtype u = x - static_cast<Dtype>(x0); Dtype v = y - static_cast<Dtype>(y0); // Define an empty align region to be zero bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype p00 = bottom_data[y0 * width + x0]; Dtype p01 = bottom_data[y1 * width + x0]; Dtype p10 = bottom_data[y0 * width + x1]; Dtype p11 = bottom_data[y1 * width + x1]; top_data[index] = (1-v)*((1-u)*p00 + u*p10) + v*((1-u)*p01 + u*p11); } } template <typename Dtype> void ROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ROIAlignForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, spatial_scale_, channels_, height_, width_, aligned_height_, aligned_width_, bottom_rois, top_data, argmax_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int aligned_height, const int aligned_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that aligned this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } Dtype roi_start_w = offset_bottom_rois[1] * spatial_scale; Dtype roi_start_h = offset_bottom_rois[2] * spatial_scale; Dtype roi_end_w = offset_bottom_rois[3] * spatial_scale; Dtype roi_end_h = offset_bottom_rois[4] * spatial_scale; // Skip if ROI doesn't include (h, w), consider 1 px outside. const bool in_roi = (w >= (roi_start_w + 1) && w <= (roi_end_w - 1) && h >= (roi_start_h + 1) && h <= (roi_end_h - 1)); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * aligned_height * aligned_width; const Dtype* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of aligned units that could have aligned // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w, 1.0); int roi_height = max(roi_end_h - roi_start_h, 1.0); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(aligned_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(aligned_width); const Dtype x = (static_cast<Dtype>(aligned_width) + 0.5) * bin_size_w - 0.5; const Dtype y = (static_cast<Dtype>(aligned_width) + 0.5) * bin_size_h - 0.5; const int x0 = static_cast<int>(x); const int y0 = static_cast<int>(y); const int x1 = min(x0 + 1, width); const int y1 = min(y0 + 1, height); Dtype u = x - static_cast<Dtype>(x0); Dtype v = y - static_cast<Dtype>(y0); if(x0 == w && y0 == h){ gradient += (1-v) * (1-u) * offset_top_diff[h * width + w]; } else if(x1 == w && y0 == h){ gradient += (1-v) * u * offset_top_diff[h * width + w]; } else if(x0 == w && y1 == h){ gradient += (1-u) * v * offset_top_diff[h * width + w]; } else if(x1 == w && y1 == h){ gradient += v * u * offset_top_diff[h * width + w]; } } bottom_diff[index] = gradient; } } template <typename Dtype> void ROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ROIAlignBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_, height_, width_, aligned_height_, aligned_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ROIAlignLayer); } // namespace caffe
5091acb00c2a50c11d2f75e751f815785d75f9b2.cu
// ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include <cfloat> #include "caffe/fast_rcnn_layers.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int aligned_height, const int aligned_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = bottom_rois[3] * spatial_scale; Dtype roi_end_h = bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 1.0); Dtype roi_height = max(roi_end_h - roi_start_h, 1.0); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(aligned_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(aligned_width); // Add roi offsets and clip to input boundaries const Dtype x = (static_cast<Dtype>(pw) + 0.5) * bin_size_w - 0.5; const Dtype y = (static_cast<Dtype>(ph) + 0.5) * bin_size_h - 0.5; const int x0 = static_cast<int>(x); const int y0 = static_cast<int>(y); const int x1 = min(x0 + 1, width); const int y1 = min(y0 + 1, height); Dtype u = x - static_cast<Dtype>(x0); Dtype v = y - static_cast<Dtype>(y0); // Define an empty align region to be zero bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype p00 = bottom_data[y0 * width + x0]; Dtype p01 = bottom_data[y1 * width + x0]; Dtype p10 = bottom_data[y0 * width + x1]; Dtype p11 = bottom_data[y1 * width + x1]; top_data[index] = (1-v)*((1-u)*p00 + u*p10) + v*((1-u)*p01 + u*p11); } } template <typename Dtype> void ROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) ROIAlignForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, spatial_scale_, channels_, height_, width_, aligned_height_, aligned_width_, bottom_rois, top_data, argmax_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int aligned_height, const int aligned_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that aligned this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } Dtype roi_start_w = offset_bottom_rois[1] * spatial_scale; Dtype roi_start_h = offset_bottom_rois[2] * spatial_scale; Dtype roi_end_w = offset_bottom_rois[3] * spatial_scale; Dtype roi_end_h = offset_bottom_rois[4] * spatial_scale; // Skip if ROI doesn't include (h, w), consider 1 px outside. const bool in_roi = (w >= (roi_start_w + 1) && w <= (roi_end_w - 1) && h >= (roi_start_h + 1) && h <= (roi_end_h - 1)); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * aligned_height * aligned_width; const Dtype* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of aligned units that could have aligned // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w, 1.0); int roi_height = max(roi_end_h - roi_start_h, 1.0); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(aligned_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(aligned_width); const Dtype x = (static_cast<Dtype>(aligned_width) + 0.5) * bin_size_w - 0.5; const Dtype y = (static_cast<Dtype>(aligned_width) + 0.5) * bin_size_h - 0.5; const int x0 = static_cast<int>(x); const int y0 = static_cast<int>(y); const int x1 = min(x0 + 1, width); const int y1 = min(y0 + 1, height); Dtype u = x - static_cast<Dtype>(x0); Dtype v = y - static_cast<Dtype>(y0); if(x0 == w && y0 == h){ gradient += (1-v) * (1-u) * offset_top_diff[h * width + w]; } else if(x1 == w && y0 == h){ gradient += (1-v) * u * offset_top_diff[h * width + w]; } else if(x0 == w && y1 == h){ gradient += (1-u) * v * offset_top_diff[h * width + w]; } else if(x1 == w && y1 == h){ gradient += v * u * offset_top_diff[h * width + w]; } } bottom_diff[index] = gradient; } } template <typename Dtype> void ROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) ROIAlignBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_, height_, width_, aligned_height_, aligned_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ROIAlignLayer); } // namespace caffe
5d42eaf0e62da956ddb7ad0e83693f1f8acd33cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This program executes a typical convolutional layer in regular CNNs #include <iostream> #include "cnnConvLayer.h" using namespace std; // This is the CPU version, please don't modify it void convLayerCPU() { // declarations for bunch of indexing parameters int fn, sli, fmy, fmx, y, x; int sum, ifmy, ifmx, ofmy, ofmx; int filtIdx, inNeuIdx, outNeuIdx, outIdx; int filtVol = FMDEPTH * FILTSIZE * FILTSIZE; int filtArea = FILTSIZE * FILTSIZE; int fmArea = FMSIZE *FMSIZE; int outArea = FMSIZE/2 * FMSIZE/2; // Convolution for(fn = 0; fn < FILTNUM; fn++){ for(fmy = 0; fmy < FMSIZE; fmy += STRIDE){ for(fmx = 0; fmx < FMSIZE; fmx += STRIDE){ sum = 0; for(sli = 0; sli < FMDEPTH; sli++){ for(y = 0; y < FILTSIZE; y++){ for(x = 0; x < FILTSIZE; x++){ ifmy = fmy - FILTSIZE / 2 + y; ifmx = fmx - FILTSIZE / 2 + x; filtIdx = fn*filtVol + sli*filtArea + y*FILTSIZE + x; inNeuIdx = sli*fmArea + ifmy*FMSIZE + ifmx; if(ifmy >= 0 && ifmy < FMSIZE && ifmx >= 0 && ifmx < FMSIZE) sum += filt[filtIdx] * inNeu[inNeuIdx]; } } } // Activation - ReLU outNeuIdx = fn*fmArea + fmy*FMSIZE + fmx; if(sum <= 0) outNeu[outNeuIdx] = 0; else outNeu[outNeuIdx] = sum; } } } // Max Pooling with Window Size 2x2 int max, tmpVal; for(sli = 0; sli < FILTNUM; sli++){ for(fmy = 0; fmy < FMSIZE/2 ; fmy += 1){ for(fmx = 0; fmx < FMSIZE/2 ; fmx += 1){ outNeuIdx = sli*fmArea + fmy*2*FMSIZE + fmx*2; max = outNeu[outNeuIdx]; for(y = 0; y < 2; y++){ for(x = 0; x < 2; x++){ ofmy = fmy*2 + y; ofmx = fmx*2 + x; outNeuIdx = sli*fmArea + ofmy*FMSIZE + ofmx; tmpVal = outNeu[outNeuIdx]; if(tmpVal > max) max = tmpVal; } } outIdx = sli*outArea + fmy*FMSIZE/2 + fmx; outCPU[outIdx] = max; } } } } /*** Implement your CUDA Kernel here ***/ short *devInputA, *devInputB; int *convOut; int *devOut; void initGPU() { hipMalloc(&devInputA, sizeof(short)* FMDEPTH * FMSIZE * FMSIZE ); hipMalloc(&devInputB, sizeof(short)*FILTSIZE*FILTSIZE*FMDEPTH*FILTNUM ); hipMalloc(&convOut, sizeof(int)* FMSIZE * FMSIZE *FILTNUM ); hipMalloc(&devOut, sizeof(int)* FMSIZE * FMSIZE *FILTNUM /4 ); hipMemcpy(devInputA, inNeu, sizeof(short)* FMDEPTH * FMSIZE * FMSIZE, hipMemcpyHostToDevice); hipMemcpy(devInputB, filt, sizeof(short)*FILTSIZE*FILTSIZE*FMDEPTH*FILTNUM, hipMemcpyHostToDevice); } __global__ void convLayerGPU(short *inNeu , short *filt , int *outNeu) { int fn, sli, fmy, fmx, y, x; int sum, ifmy, ifmx; int filtIdx, inNeuIdx, outNeuIdx; int filtVol = FMDEPTH * FILTSIZE * FILTSIZE; int filtArea = FILTSIZE * FILTSIZE; int fmArea = FMSIZE *FMSIZE; fmx=threadIdx.x; fmy=threadIdx.y; fn=blockIdx.x; // Convolution sum = 0; for(sli = 0; sli < FMDEPTH; sli++){ for(y = 0; y < FILTSIZE; y++){ for(x = 0; x < FILTSIZE; x++){ ifmx = fmx - FILTSIZE / 2 + x; ifmy = fmy - FILTSIZE / 2 + y; filtIdx = fn*filtVol + sli*filtArea + y*FILTSIZE + x; inNeuIdx = sli*fmArea + ifmy*FMSIZE + ifmx; if(ifmy >= 0 && ifmy < FMSIZE && ifmx >= 0 && ifmx < FMSIZE) sum += filt[filtIdx] * inNeu[inNeuIdx]; } } } // Activation - ReLU outNeuIdx = fn*fmArea + fmy*FMSIZE + fmx; if(sum <= 0) outNeu[outNeuIdx] = 0; else outNeu[outNeuIdx] = sum; } __global__ void poolLayerGPU(int *inNeu , int *outNeu) { int Tx = threadIdx.x; int Ty = threadIdx.y; int block = blockIdx.x; int inNeu_ID = block*32*32 + Ty*32*2 + Tx*2 ; int Max = inNeu[inNeu_ID]; if( Max < inNeu[inNeu_ID+1] ) Max = inNeu[inNeu_ID+1]; else Max = Max; __syncthreads(); if( Max < inNeu[inNeu_ID+32] ) Max = inNeu[inNeu_ID+32]; else Max = Max; __syncthreads(); if( Max < inNeu[inNeu_ID+33] ) Max = inNeu[inNeu_ID+33]; else Max = Max; __syncthreads(); outNeu[block*16*16 + Ty*16 + Tx] = Max; } /*** Implement your CUDA Kernel here ***/ int main() { int convLayerCPUExecTime, convLayerGPUExecTime; init(); timespec time_begin, time_end; clock_gettime(CLOCK_REALTIME, &time_begin); convLayerCPU(); clock_gettime(CLOCK_REALTIME, &time_end); convLayerCPUExecTime = timespec_diff_us(time_begin, time_end); cout << "CPU time for executing a typical convolutional layer = " << convLayerCPUExecTime / 1000 << "ms" << endl; /*** Lunch your CUDA Kernel here ***/ initGPU(); clock_gettime(CLOCK_REALTIME, &time_begin); hipLaunchKernelGGL(( convLayerGPU), dim3(512),dim3(dim3(32,32)), 0, 0, devInputA,devInputB,convOut); // Launch the kernel hipDeviceSynchronize(); // Do synchronization before clock_gettime() dim3 threadsPerBlock_pool(FMSIZE/2,FMSIZE/2); dim3 numBlocks_pool(FMDEPTH,1); hipLaunchKernelGGL(( poolLayerGPU), dim3(numBlocks_pool),dim3(threadsPerBlock_pool), 0, 0, convOut,devOut); hipDeviceSynchronize(); // Do synchronization before clock_gettime() hipMemcpy(outGPU, devOut , sizeof(int) * FMSIZE * FMSIZE *FILTNUM /4, hipMemcpyDeviceToHost); hipFree(&devInputA); hipFree(&devInputB); hipFree(&devOut); /*** Lunch your CUDA Kernel here ***/ clock_gettime(CLOCK_REALTIME, &time_end); convLayerGPUExecTime = timespec_diff_us(time_begin, time_end); cout << "GPU time for executing a typical convolutional layer = " << convLayerGPUExecTime / 1000 << "ms" << endl; if(checker()){ cout << "Congratulations! You pass the check." << endl; cout << "Speedup: " << (float)convLayerCPUExecTime / convLayerGPUExecTime << endl; } else cout << "Sorry! Your result is wrong." << endl; ending(); return 0; }
5d42eaf0e62da956ddb7ad0e83693f1f8acd33cd.cu
// This program executes a typical convolutional layer in regular CNNs #include <iostream> #include "cnnConvLayer.h" using namespace std; // This is the CPU version, please don't modify it void convLayerCPU() { // declarations for bunch of indexing parameters int fn, sli, fmy, fmx, y, x; int sum, ifmy, ifmx, ofmy, ofmx; int filtIdx, inNeuIdx, outNeuIdx, outIdx; int filtVol = FMDEPTH * FILTSIZE * FILTSIZE; int filtArea = FILTSIZE * FILTSIZE; int fmArea = FMSIZE *FMSIZE; int outArea = FMSIZE/2 * FMSIZE/2; // Convolution for(fn = 0; fn < FILTNUM; fn++){ for(fmy = 0; fmy < FMSIZE; fmy += STRIDE){ for(fmx = 0; fmx < FMSIZE; fmx += STRIDE){ sum = 0; for(sli = 0; sli < FMDEPTH; sli++){ for(y = 0; y < FILTSIZE; y++){ for(x = 0; x < FILTSIZE; x++){ ifmy = fmy - FILTSIZE / 2 + y; ifmx = fmx - FILTSIZE / 2 + x; filtIdx = fn*filtVol + sli*filtArea + y*FILTSIZE + x; inNeuIdx = sli*fmArea + ifmy*FMSIZE + ifmx; if(ifmy >= 0 && ifmy < FMSIZE && ifmx >= 0 && ifmx < FMSIZE) sum += filt[filtIdx] * inNeu[inNeuIdx]; } } } // Activation - ReLU outNeuIdx = fn*fmArea + fmy*FMSIZE + fmx; if(sum <= 0) outNeu[outNeuIdx] = 0; else outNeu[outNeuIdx] = sum; } } } // Max Pooling with Window Size 2x2 int max, tmpVal; for(sli = 0; sli < FILTNUM; sli++){ for(fmy = 0; fmy < FMSIZE/2 ; fmy += 1){ for(fmx = 0; fmx < FMSIZE/2 ; fmx += 1){ outNeuIdx = sli*fmArea + fmy*2*FMSIZE + fmx*2; max = outNeu[outNeuIdx]; for(y = 0; y < 2; y++){ for(x = 0; x < 2; x++){ ofmy = fmy*2 + y; ofmx = fmx*2 + x; outNeuIdx = sli*fmArea + ofmy*FMSIZE + ofmx; tmpVal = outNeu[outNeuIdx]; if(tmpVal > max) max = tmpVal; } } outIdx = sli*outArea + fmy*FMSIZE/2 + fmx; outCPU[outIdx] = max; } } } } /*** Implement your CUDA Kernel here ***/ short *devInputA, *devInputB; int *convOut; int *devOut; void initGPU() { cudaMalloc(&devInputA, sizeof(short)* FMDEPTH * FMSIZE * FMSIZE ); cudaMalloc(&devInputB, sizeof(short)*FILTSIZE*FILTSIZE*FMDEPTH*FILTNUM ); cudaMalloc(&convOut, sizeof(int)* FMSIZE * FMSIZE *FILTNUM ); cudaMalloc(&devOut, sizeof(int)* FMSIZE * FMSIZE *FILTNUM /4 ); cudaMemcpy(devInputA, inNeu, sizeof(short)* FMDEPTH * FMSIZE * FMSIZE, cudaMemcpyHostToDevice); cudaMemcpy(devInputB, filt, sizeof(short)*FILTSIZE*FILTSIZE*FMDEPTH*FILTNUM, cudaMemcpyHostToDevice); } __global__ void convLayerGPU(short *inNeu , short *filt , int *outNeu) { int fn, sli, fmy, fmx, y, x; int sum, ifmy, ifmx; int filtIdx, inNeuIdx, outNeuIdx; int filtVol = FMDEPTH * FILTSIZE * FILTSIZE; int filtArea = FILTSIZE * FILTSIZE; int fmArea = FMSIZE *FMSIZE; fmx=threadIdx.x; fmy=threadIdx.y; fn=blockIdx.x; // Convolution sum = 0; for(sli = 0; sli < FMDEPTH; sli++){ for(y = 0; y < FILTSIZE; y++){ for(x = 0; x < FILTSIZE; x++){ ifmx = fmx - FILTSIZE / 2 + x; ifmy = fmy - FILTSIZE / 2 + y; filtIdx = fn*filtVol + sli*filtArea + y*FILTSIZE + x; inNeuIdx = sli*fmArea + ifmy*FMSIZE + ifmx; if(ifmy >= 0 && ifmy < FMSIZE && ifmx >= 0 && ifmx < FMSIZE) sum += filt[filtIdx] * inNeu[inNeuIdx]; } } } // Activation - ReLU outNeuIdx = fn*fmArea + fmy*FMSIZE + fmx; if(sum <= 0) outNeu[outNeuIdx] = 0; else outNeu[outNeuIdx] = sum; } __global__ void poolLayerGPU(int *inNeu , int *outNeu) { int Tx = threadIdx.x; int Ty = threadIdx.y; int block = blockIdx.x; int inNeu_ID = block*32*32 + Ty*32*2 + Tx*2 ; int Max = inNeu[inNeu_ID]; if( Max < inNeu[inNeu_ID+1] ) Max = inNeu[inNeu_ID+1]; else Max = Max; __syncthreads(); if( Max < inNeu[inNeu_ID+32] ) Max = inNeu[inNeu_ID+32]; else Max = Max; __syncthreads(); if( Max < inNeu[inNeu_ID+33] ) Max = inNeu[inNeu_ID+33]; else Max = Max; __syncthreads(); outNeu[block*16*16 + Ty*16 + Tx] = Max; } /*** Implement your CUDA Kernel here ***/ int main() { int convLayerCPUExecTime, convLayerGPUExecTime; init(); timespec time_begin, time_end; clock_gettime(CLOCK_REALTIME, &time_begin); convLayerCPU(); clock_gettime(CLOCK_REALTIME, &time_end); convLayerCPUExecTime = timespec_diff_us(time_begin, time_end); cout << "CPU time for executing a typical convolutional layer = " << convLayerCPUExecTime / 1000 << "ms" << endl; /*** Lunch your CUDA Kernel here ***/ initGPU(); clock_gettime(CLOCK_REALTIME, &time_begin); convLayerGPU<<<512,dim3(32,32)>>>(devInputA,devInputB,convOut); // Launch the kernel cudaDeviceSynchronize(); // Do synchronization before clock_gettime() dim3 threadsPerBlock_pool(FMSIZE/2,FMSIZE/2); dim3 numBlocks_pool(FMDEPTH,1); poolLayerGPU<<<numBlocks_pool,threadsPerBlock_pool>>>(convOut,devOut); cudaDeviceSynchronize(); // Do synchronization before clock_gettime() cudaMemcpy(outGPU, devOut , sizeof(int) * FMSIZE * FMSIZE *FILTNUM /4, cudaMemcpyDeviceToHost); cudaFree(&devInputA); cudaFree(&devInputB); cudaFree(&devOut); /*** Lunch your CUDA Kernel here ***/ clock_gettime(CLOCK_REALTIME, &time_end); convLayerGPUExecTime = timespec_diff_us(time_begin, time_end); cout << "GPU time for executing a typical convolutional layer = " << convLayerGPUExecTime / 1000 << "ms" << endl; if(checker()){ cout << "Congratulations! You pass the check." << endl; cout << "Speedup: " << (float)convLayerCPUExecTime / convLayerGPUExecTime << endl; } else cout << "Sorry! Your result is wrong." << endl; ending(); return 0; }
878a912900570a4c74f0d924766e933fea4ff01e.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime_api.h> #include <helper_cuda.h> __global__ void d_malloc_3d_gpu_kernel1(double *** array3D, int nx, int ny, int nz) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nz) { array3D[i] = (double **) array3D + nz + i * ny; //printf("k1: %i | %i\n", i, i* ny); } } __global__ void d_malloc_3d_gpu_kernel2(double *** array3D, int nx, int ny, int nz) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < nz && j < ny) { array3D[i][j] = (double *) array3D + nz + nz * ny + (i * nx * ny) + (j * nx); //printf("k2: %i %i | %i\n", i, j, (i * nx * ny) + (j * nx)); } } extern "C" { double *** d_malloc_3d_gpu(int nx, int ny, int nz) { if (nx <= 0 || ny <= 0 || nz <= 0) return NULL; double ***array3D; checkCudaErrors( hipMalloc((void**)&array3D, nz * sizeof(double **) + nz * ny * sizeof(double *) + nz * ny * nx * sizeof(double)) ); if (array3D == NULL) { return NULL; } dim3 block(16, 16); dim3 grid((nz + 15) / 16, (ny + 15) /16); hipLaunchKernelGGL(( d_malloc_3d_gpu_kernel1), dim3(grid.x), dim3(block.x), 0, 0, array3D, nx, ny, nz); hipLaunchKernelGGL(( d_malloc_3d_gpu_kernel2), dim3(grid), dim3(block), 0, 0, array3D, nx, ny, nz); checkCudaErrors( hipDeviceSynchronize() ); return array3D; } void free_gpu(double ***array3D) { checkCudaErrors( hipFree(array3D) ); } }
878a912900570a4c74f0d924766e933fea4ff01e.cu
#include <cuda_runtime_api.h> #include <helper_cuda.h> __global__ void d_malloc_3d_gpu_kernel1(double *** array3D, int nx, int ny, int nz) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nz) { array3D[i] = (double **) array3D + nz + i * ny; //printf("k1: %i | %i\n", i, i* ny); } } __global__ void d_malloc_3d_gpu_kernel2(double *** array3D, int nx, int ny, int nz) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < nz && j < ny) { array3D[i][j] = (double *) array3D + nz + nz * ny + (i * nx * ny) + (j * nx); //printf("k2: %i %i | %i\n", i, j, (i * nx * ny) + (j * nx)); } } extern "C" { double *** d_malloc_3d_gpu(int nx, int ny, int nz) { if (nx <= 0 || ny <= 0 || nz <= 0) return NULL; double ***array3D; checkCudaErrors( cudaMalloc((void**)&array3D, nz * sizeof(double **) + nz * ny * sizeof(double *) + nz * ny * nx * sizeof(double)) ); if (array3D == NULL) { return NULL; } dim3 block(16, 16); dim3 grid((nz + 15) / 16, (ny + 15) /16); d_malloc_3d_gpu_kernel1<<<grid.x, block.x>>>(array3D, nx, ny, nz); d_malloc_3d_gpu_kernel2<<<grid, block>>>(array3D, nx, ny, nz); checkCudaErrors( cudaDeviceSynchronize() ); return array3D; } void free_gpu(double ***array3D) { checkCudaErrors( cudaFree(array3D) ); } }
1f295d2ed726d5397078b24b4400befc0d317339.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void RMSPropUpdate(int N, Dtype *g, Dtype *h, Dtype rms_decay, Dtype delta, Dtype local_rate) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; float hi = h[i] = rms_decay * h[i] + (1 - rms_decay) * gi * gi; g[i] = local_rate * g[i] / (sqrt(hi) + delta); } } template <typename Dtype> void rmsprop_update_gpu(int N, Dtype *g, Dtype *h, Dtype rms_decay, Dtype delta, Dtype local_rate) { RMSPropUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, g, h, rms_decay, delta, local_rate); CUDA_POST_KERNEL_CHECK; } template void rmsprop_update_gpu<float>(int, float *, float *, float, float, float); template void rmsprop_update_gpu<double>(int, double *, double *, double, double, double); } // namespace caffe
1f295d2ed726d5397078b24b4400befc0d317339.cu
#include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void RMSPropUpdate(int N, Dtype *g, Dtype *h, Dtype rms_decay, Dtype delta, Dtype local_rate) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; float hi = h[i] = rms_decay * h[i] + (1 - rms_decay) * gi * gi; g[i] = local_rate * g[i] / (sqrt(hi) + delta); } } template <typename Dtype> void rmsprop_update_gpu(int N, Dtype *g, Dtype *h, Dtype rms_decay, Dtype delta, Dtype local_rate) { RMSPropUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, g, h, rms_decay, delta, local_rate); CUDA_POST_KERNEL_CHECK; } template void rmsprop_update_gpu<float>(int, float *, float *, float, float, float); template void rmsprop_update_gpu<double>(int, double *, double *, double, double, double); } // namespace caffe
80f445b2e5837fc56cfb5ec61bf9460eb8b9e371.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> struct Target { int2 min; int2 max; __device__ bool contains(int2 pos) const { return pos.x >= min.x && pos.x <= max.x && pos.y >= min.y && pos.y <= max.y; } }; __device__ int simulate(Target target, int2 init_vel) { int2 vel = init_vel; int2 pos = make_int2(0, 0); int max_y = 0; while (pos.y >= target.min.y) { pos.x += vel.x; pos.y += vel.y; vel.x += vel.x != 0 ? (vel.x > 0 ? -1 : 1) : 0; vel.y -= 1; max_y = max(pos.y, max_y); if (target.contains(pos)) return max_y; } return -1; } __global__ void find_max_y(Target target, int *p_max_y) { int x = blockDim.x*blockIdx.x + threadIdx.x; int y = blockDim.y*blockIdx.y + threadIdx.y; int max_y = simulate(target, make_int2(x, y)); atomicMax(p_max_y, max_y); } int main(int argc, char **argv) { Target target; scanf("target area: x=%d..%d, y=%d..%d", &target.min.x, &target.max.x, &target.min.y, &target.max.y); int mx = abs(target.min.x); int my = abs(target.min.y); int *p_max_y; hipMallocManaged(&p_max_y, sizeof(int)); *p_max_y = 0; dim3 block_threads = { 8, 8 }; dim3 num_blocks = { (mx*4+block_threads.x-1)/block_threads.x, (my*4+block_threads.y-1)/block_threads.y }; hipLaunchKernelGGL(( find_max_y), dim3(num_blocks), dim3(block_threads), 0, 0, target, p_max_y); int max_y = 0; hipMemcpy(&max_y, p_max_y, sizeof(int), hipMemcpyDefault); hipFree(p_max_y); printf("%d\n", max_y); return 0; }
80f445b2e5837fc56cfb5ec61bf9460eb8b9e371.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> struct Target { int2 min; int2 max; __device__ bool contains(int2 pos) const { return pos.x >= min.x && pos.x <= max.x && pos.y >= min.y && pos.y <= max.y; } }; __device__ int simulate(Target target, int2 init_vel) { int2 vel = init_vel; int2 pos = make_int2(0, 0); int max_y = 0; while (pos.y >= target.min.y) { pos.x += vel.x; pos.y += vel.y; vel.x += vel.x != 0 ? (vel.x > 0 ? -1 : 1) : 0; vel.y -= 1; max_y = max(pos.y, max_y); if (target.contains(pos)) return max_y; } return -1; } __global__ void find_max_y(Target target, int *p_max_y) { int x = blockDim.x*blockIdx.x + threadIdx.x; int y = blockDim.y*blockIdx.y + threadIdx.y; int max_y = simulate(target, make_int2(x, y)); atomicMax(p_max_y, max_y); } int main(int argc, char **argv) { Target target; scanf("target area: x=%d..%d, y=%d..%d", &target.min.x, &target.max.x, &target.min.y, &target.max.y); int mx = abs(target.min.x); int my = abs(target.min.y); int *p_max_y; cudaMallocManaged(&p_max_y, sizeof(int)); *p_max_y = 0; dim3 block_threads = { 8, 8 }; dim3 num_blocks = { (mx*4+block_threads.x-1)/block_threads.x, (my*4+block_threads.y-1)/block_threads.y }; find_max_y<<<num_blocks, block_threads>>>(target, p_max_y); int max_y = 0; cudaMemcpy(&max_y, p_max_y, sizeof(int), cudaMemcpyDefault); cudaFree(p_max_y); printf("%d\n", max_y); return 0; }
088b0ec1bfbce8102d5aaaa48325d949cbb9a63c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" #define blockSize 128 namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // Up-Sweep __global__ void kernUpSweep(int n, int d, int* sweepArray) { int index = blockDim.x * blockIdx.x + threadIdx.x; int power = 1 << (d + 1); int lastPower = 1 << d; int arrayIndex = index * power; if (arrayIndex >= n) return; sweepArray[arrayIndex + power - 1] += sweepArray[arrayIndex + lastPower - 1]; } // Down-Sweep __global__ void kernDownSweep(int n, int d, int* sweepArray) { int index = blockDim.x * blockIdx.x + threadIdx.x; int power = 1 << (d + 1); int lastPower = 1 << d; int arrayIndex = index * power; if (arrayIndex >= n) return; int t = sweepArray[arrayIndex + lastPower - 1]; sweepArray[arrayIndex + lastPower - 1] = sweepArray[arrayIndex + power - 1]; sweepArray[arrayIndex + power - 1] += t; } // Initialize Extra Memory __global__ void kernResetIntBuffer(int N, int* intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } // Non-zero Entry Scan __global__ void kernNonZeroScan(int n, int roundN, int* zeroArray, int* iArray) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= roundN) return; else if (index >= n) { zeroArray[index] = 0; return; } if (iArray[index] == 0) zeroArray[index] = 0; else zeroArray[index] = 1; } __global__ void kernCompact(int n, int* zeroArray, int* idxArray, int* oArray, int* iArray) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) return; if (zeroArray[index] == 1) { oArray[idxArray[index]] = iArray[index]; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // TODO int scanN = ilog2ceil(n) - 1; int roundCount = pow(2, scanN + 1); int* result; hipMalloc((void**)&result, roundCount * sizeof(int)); dim3 roundInitilize = (roundCount + blockSize - 1) / blockSize; hipLaunchKernelGGL(( kernResetIntBuffer), dim3(roundInitilize), dim3(blockSize), 0, 0, roundCount, result, 0); hipMemcpy(result, idata, n * sizeof(int), hipMemcpyHostToDevice); timer().startGpuTimer(); for (int d = 0; d <= scanN; d++) { dim3 upSweepBlockPerGrid = (roundCount / powf(2, d + 1) + blockSize - 1) / blockSize; hipLaunchKernelGGL(( kernUpSweep), dim3(upSweepBlockPerGrid), dim3(blockSize), 0, 0, roundCount, d, result); } hipMemset(result + roundCount - 1, 0, sizeof(int)); for (int d = scanN; d >= 0; d--) { dim3 upSweepBlockPerGrid = (roundCount / powf(2, d + 1) + blockSize - 1) / blockSize; hipLaunchKernelGGL(( kernDownSweep), dim3(upSweepBlockPerGrid), dim3(blockSize), 0, 0, roundCount, d, result); } timer().endGpuTimer(); hipMemcpy(odata, result, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(result); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { timer().startGpuTimer(); // TODO int scanN = ilog2ceil(n) - 1; int roundCount = pow(2, scanN + 1); int* roundZeroArray; hipMalloc((void**)&roundZeroArray, roundCount * sizeof(int)); int* cudaIData; hipMalloc((void**)&cudaIData, n * sizeof(int)); hipMemcpy(cudaIData, idata, n * sizeof(int), hipMemcpyHostToDevice); dim3 zeroScanBlockPerGrid = (roundCount + blockSize - 1) / blockSize; hipLaunchKernelGGL(( StreamCompaction::Common::kernMapToBoolean), dim3(zeroScanBlockPerGrid), dim3(blockSize), 0, 0, n, roundCount, roundZeroArray, cudaIData); int* roundIdxArray; hipMalloc((void**)&roundIdxArray, roundCount * sizeof(int)); hipMemcpy(roundIdxArray, roundZeroArray, roundCount * sizeof(int), hipMemcpyDeviceToDevice); for (int d = 0; d <= scanN; d++) { dim3 upSweepBlockPerGrid = (roundCount / powf(2, d + 1) + blockSize - 1) / blockSize; kernUpSweep << <upSweepBlockPerGrid, blockSize >> > (roundCount, d, roundIdxArray); } int* a = new int[1](); a[0] = 0; hipMemcpy(roundIdxArray + roundCount - 1, a, sizeof(int), hipMemcpyHostToDevice); delete[]a; for (int d = scanN; d >= 0; d--) { dim3 upSweepBlockPerGrid = (roundCount / powf(2, d + 1) + blockSize - 1) / blockSize; kernDownSweep << <upSweepBlockPerGrid, blockSize >> > (roundCount, d, roundIdxArray); } int compactCountTemp = -1; hipMemcpy(&compactCountTemp, roundIdxArray + roundCount - 1, sizeof(int), hipMemcpyDeviceToHost); int* result; hipMalloc((void**)&result, n * sizeof(int)); dim3 compactBlockPerGrid = (n + blockSize - 1) / blockSize; hipLaunchKernelGGL(( StreamCompaction::Common::kernScatter), dim3(compactBlockPerGrid), dim3(blockSize), 0, 0, n, result, cudaIData, roundZeroArray, roundIdxArray); hipMemcpy(odata, result, n * sizeof(int), hipMemcpyDeviceToHost); timer().endGpuTimer(); hipFree(roundZeroArray); hipFree(cudaIData); hipFree(roundIdxArray); hipFree(result); return compactCountTemp; } } namespace Radix { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // Scan each bit __global__ void kernScanBit(int n, int* iArray, int* bitArray, int* eArray, int bit, int* bitExist) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) return; if ((iArray[index] >> bit) & 1) { if (bitExist[0] == 0) bitExist[0] = 1; bitArray[index] = 1; eArray[index] = 0; } else { bitArray[index] = 0; eArray[index] = 1; } } __global__ void kernComputeTArray(int n, int totalFalses, int* tArray, int* fArray) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) return; tArray[index] = index - fArray[index] + totalFalses; } __global__ void kernComputeIndex(int n, int* dArray, int* tArray, int* fArray, int* bArray) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) return; if (bArray[index] == 1) dArray[index] = tArray[index]; else dArray[index] = fArray[index]; } __global__ void kernReorganizeArray(int n, int* dArray, int* iArray, int* oArray) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) return; oArray[dArray[index]] = iArray[index]; } void radixSort(int n, int* odata, const int* idata) { int scanN = ilog2ceil(n) - 1; int roundCount = pow(2, scanN + 1); int* dev_iArray; int* dev_bArray; int* dev_eArray; int* dev_fArray; int* dev_tArray; int* dev_dArray; int* dev_oArray; hipMalloc((void**)&dev_iArray, n * sizeof(int)); hipMemcpy(dev_iArray, idata, n * sizeof(int), hipMemcpyHostToDevice); hipMalloc((void**)&dev_bArray, n * sizeof(int)); hipMalloc((void**)&dev_eArray, n * sizeof(int)); hipMalloc((void**)&dev_fArray, roundCount * sizeof(int)); hipMalloc((void**)&dev_tArray, n * sizeof(int)); hipMalloc((void**)&dev_dArray, n * sizeof(int)); hipMalloc((void**)&dev_oArray, n * sizeof(int)); int* isBitExist; hipMalloc((void**)&isBitExist, sizeof(int)); int curBit = 0; dim3 bitCheckBlocksPerGrid = (n + blockSize - 1) / blockSize; dim3 scanBlocksPerGrid = (roundCount + blockSize - 1) / blockSize; while (true) { // Get bit condition for each entry hipMemset(isBitExist, 0, sizeof(int)); hipLaunchKernelGGL(( kernScanBit), dim3(bitCheckBlocksPerGrid), dim3(blockSize), 0, 0, n, dev_iArray, dev_bArray, dev_eArray, curBit, isBitExist); int h_isBitExist = 0; hipMemcpy(&h_isBitExist, isBitExist, sizeof(int), hipMemcpyDeviceToHost); if (h_isBitExist == 0) break; // Scan the e array dim3 roundInitilize = (roundCount + blockSize - 1) / blockSize; hipLaunchKernelGGL(( StreamCompaction::Efficient::kernResetIntBuffer), dim3(scanBlocksPerGrid), dim3(blockSize), 0, 0, roundCount, dev_fArray, 0); hipMemcpy(dev_fArray, dev_eArray, n * sizeof(int), hipMemcpyDeviceToDevice); int eArrayFinal = 0; hipMemcpy(&eArrayFinal, dev_fArray + roundCount - 1, sizeof(int), hipMemcpyDeviceToHost); for (int d = 0; d <= scanN; d++) { dim3 upSweepBlockPerGrid = (roundCount / powf(2, d + 1) + blockSize - 1) / blockSize; hipLaunchKernelGGL(( StreamCompaction::Efficient::kernUpSweep), dim3(upSweepBlockPerGrid), dim3(blockSize), 0, 0, roundCount, d, dev_fArray); } hipMemset(dev_fArray + roundCount - 1, 0, sizeof(int)); for (int d = scanN; d >= 0; d--) { dim3 upSweepBlockPerGrid = (roundCount / powf(2, d + 1) + blockSize - 1) / blockSize; hipLaunchKernelGGL(( StreamCompaction::Efficient::kernDownSweep), dim3(upSweepBlockPerGrid), dim3(blockSize), 0, 0, roundCount, d, dev_fArray); } int fArrayFinal = 0; hipMemcpy(&fArrayFinal, dev_fArray + roundCount - 1, sizeof(int), hipMemcpyDeviceToHost); int totalFalses = eArrayFinal + fArrayFinal; hipLaunchKernelGGL(( kernComputeTArray), dim3(bitCheckBlocksPerGrid), dim3(blockSize), 0, 0, n, totalFalses, dev_tArray, dev_fArray); hipLaunchKernelGGL(( kernComputeIndex), dim3(bitCheckBlocksPerGrid), dim3(blockSize), 0, 0, n, dev_dArray, dev_tArray, dev_fArray, dev_bArray); hipLaunchKernelGGL(( kernReorganizeArray), dim3(bitCheckBlocksPerGrid), dim3(blockSize), 0, 0, n, dev_dArray, dev_iArray, dev_oArray); hipMemcpy(dev_iArray, dev_oArray, n * sizeof(int), hipMemcpyDeviceToDevice); curBit++; } hipMemcpy(odata, dev_iArray, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_iArray); hipFree(dev_eArray); hipFree(dev_bArray); hipFree(dev_fArray); hipFree(dev_tArray); hipFree(dev_dArray); hipFree(dev_oArray); } } }
088b0ec1bfbce8102d5aaaa48325d949cbb9a63c.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" #define blockSize 128 namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // Up-Sweep __global__ void kernUpSweep(int n, int d, int* sweepArray) { int index = blockDim.x * blockIdx.x + threadIdx.x; int power = 1 << (d + 1); int lastPower = 1 << d; int arrayIndex = index * power; if (arrayIndex >= n) return; sweepArray[arrayIndex + power - 1] += sweepArray[arrayIndex + lastPower - 1]; } // Down-Sweep __global__ void kernDownSweep(int n, int d, int* sweepArray) { int index = blockDim.x * blockIdx.x + threadIdx.x; int power = 1 << (d + 1); int lastPower = 1 << d; int arrayIndex = index * power; if (arrayIndex >= n) return; int t = sweepArray[arrayIndex + lastPower - 1]; sweepArray[arrayIndex + lastPower - 1] = sweepArray[arrayIndex + power - 1]; sweepArray[arrayIndex + power - 1] += t; } // Initialize Extra Memory __global__ void kernResetIntBuffer(int N, int* intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } // Non-zero Entry Scan __global__ void kernNonZeroScan(int n, int roundN, int* zeroArray, int* iArray) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= roundN) return; else if (index >= n) { zeroArray[index] = 0; return; } if (iArray[index] == 0) zeroArray[index] = 0; else zeroArray[index] = 1; } __global__ void kernCompact(int n, int* zeroArray, int* idxArray, int* oArray, int* iArray) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) return; if (zeroArray[index] == 1) { oArray[idxArray[index]] = iArray[index]; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // TODO int scanN = ilog2ceil(n) - 1; int roundCount = pow(2, scanN + 1); int* result; cudaMalloc((void**)&result, roundCount * sizeof(int)); dim3 roundInitilize = (roundCount + blockSize - 1) / blockSize; kernResetIntBuffer<<<roundInitilize, blockSize>>>(roundCount, result, 0); cudaMemcpy(result, idata, n * sizeof(int), cudaMemcpyHostToDevice); timer().startGpuTimer(); for (int d = 0; d <= scanN; d++) { dim3 upSweepBlockPerGrid = (roundCount / powf(2, d + 1) + blockSize - 1) / blockSize; kernUpSweep<<<upSweepBlockPerGrid, blockSize>>>(roundCount, d, result); } cudaMemset(result + roundCount - 1, 0, sizeof(int)); for (int d = scanN; d >= 0; d--) { dim3 upSweepBlockPerGrid = (roundCount / powf(2, d + 1) + blockSize - 1) / blockSize; kernDownSweep<<<upSweepBlockPerGrid, blockSize>>>(roundCount, d, result); } timer().endGpuTimer(); cudaMemcpy(odata, result, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(result); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { timer().startGpuTimer(); // TODO int scanN = ilog2ceil(n) - 1; int roundCount = pow(2, scanN + 1); int* roundZeroArray; cudaMalloc((void**)&roundZeroArray, roundCount * sizeof(int)); int* cudaIData; cudaMalloc((void**)&cudaIData, n * sizeof(int)); cudaMemcpy(cudaIData, idata, n * sizeof(int), cudaMemcpyHostToDevice); dim3 zeroScanBlockPerGrid = (roundCount + blockSize - 1) / blockSize; StreamCompaction::Common::kernMapToBoolean<<<zeroScanBlockPerGrid, blockSize>>>(n, roundCount, roundZeroArray, cudaIData); int* roundIdxArray; cudaMalloc((void**)&roundIdxArray, roundCount * sizeof(int)); cudaMemcpy(roundIdxArray, roundZeroArray, roundCount * sizeof(int), cudaMemcpyDeviceToDevice); for (int d = 0; d <= scanN; d++) { dim3 upSweepBlockPerGrid = (roundCount / powf(2, d + 1) + blockSize - 1) / blockSize; kernUpSweep << <upSweepBlockPerGrid, blockSize >> > (roundCount, d, roundIdxArray); } int* a = new int[1](); a[0] = 0; cudaMemcpy(roundIdxArray + roundCount - 1, a, sizeof(int), cudaMemcpyHostToDevice); delete[]a; for (int d = scanN; d >= 0; d--) { dim3 upSweepBlockPerGrid = (roundCount / powf(2, d + 1) + blockSize - 1) / blockSize; kernDownSweep << <upSweepBlockPerGrid, blockSize >> > (roundCount, d, roundIdxArray); } int compactCountTemp = -1; cudaMemcpy(&compactCountTemp, roundIdxArray + roundCount - 1, sizeof(int), cudaMemcpyDeviceToHost); int* result; cudaMalloc((void**)&result, n * sizeof(int)); dim3 compactBlockPerGrid = (n + blockSize - 1) / blockSize; StreamCompaction::Common::kernScatter<<<compactBlockPerGrid, blockSize>>>(n, result, cudaIData, roundZeroArray, roundIdxArray); cudaMemcpy(odata, result, n * sizeof(int), cudaMemcpyDeviceToHost); timer().endGpuTimer(); cudaFree(roundZeroArray); cudaFree(cudaIData); cudaFree(roundIdxArray); cudaFree(result); return compactCountTemp; } } namespace Radix { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // Scan each bit __global__ void kernScanBit(int n, int* iArray, int* bitArray, int* eArray, int bit, int* bitExist) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) return; if ((iArray[index] >> bit) & 1) { if (bitExist[0] == 0) bitExist[0] = 1; bitArray[index] = 1; eArray[index] = 0; } else { bitArray[index] = 0; eArray[index] = 1; } } __global__ void kernComputeTArray(int n, int totalFalses, int* tArray, int* fArray) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) return; tArray[index] = index - fArray[index] + totalFalses; } __global__ void kernComputeIndex(int n, int* dArray, int* tArray, int* fArray, int* bArray) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) return; if (bArray[index] == 1) dArray[index] = tArray[index]; else dArray[index] = fArray[index]; } __global__ void kernReorganizeArray(int n, int* dArray, int* iArray, int* oArray) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) return; oArray[dArray[index]] = iArray[index]; } void radixSort(int n, int* odata, const int* idata) { int scanN = ilog2ceil(n) - 1; int roundCount = pow(2, scanN + 1); int* dev_iArray; int* dev_bArray; int* dev_eArray; int* dev_fArray; int* dev_tArray; int* dev_dArray; int* dev_oArray; cudaMalloc((void**)&dev_iArray, n * sizeof(int)); cudaMemcpy(dev_iArray, idata, n * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_bArray, n * sizeof(int)); cudaMalloc((void**)&dev_eArray, n * sizeof(int)); cudaMalloc((void**)&dev_fArray, roundCount * sizeof(int)); cudaMalloc((void**)&dev_tArray, n * sizeof(int)); cudaMalloc((void**)&dev_dArray, n * sizeof(int)); cudaMalloc((void**)&dev_oArray, n * sizeof(int)); int* isBitExist; cudaMalloc((void**)&isBitExist, sizeof(int)); int curBit = 0; dim3 bitCheckBlocksPerGrid = (n + blockSize - 1) / blockSize; dim3 scanBlocksPerGrid = (roundCount + blockSize - 1) / blockSize; while (true) { // Get bit condition for each entry cudaMemset(isBitExist, 0, sizeof(int)); kernScanBit<<<bitCheckBlocksPerGrid, blockSize>>>(n, dev_iArray, dev_bArray, dev_eArray, curBit, isBitExist); int h_isBitExist = 0; cudaMemcpy(&h_isBitExist, isBitExist, sizeof(int), cudaMemcpyDeviceToHost); if (h_isBitExist == 0) break; // Scan the e array dim3 roundInitilize = (roundCount + blockSize - 1) / blockSize; StreamCompaction::Efficient::kernResetIntBuffer<<<scanBlocksPerGrid, blockSize>>>(roundCount, dev_fArray, 0); cudaMemcpy(dev_fArray, dev_eArray, n * sizeof(int), cudaMemcpyDeviceToDevice); int eArrayFinal = 0; cudaMemcpy(&eArrayFinal, dev_fArray + roundCount - 1, sizeof(int), cudaMemcpyDeviceToHost); for (int d = 0; d <= scanN; d++) { dim3 upSweepBlockPerGrid = (roundCount / powf(2, d + 1) + blockSize - 1) / blockSize; StreamCompaction::Efficient::kernUpSweep<<<upSweepBlockPerGrid, blockSize>>>(roundCount, d, dev_fArray); } cudaMemset(dev_fArray + roundCount - 1, 0, sizeof(int)); for (int d = scanN; d >= 0; d--) { dim3 upSweepBlockPerGrid = (roundCount / powf(2, d + 1) + blockSize - 1) / blockSize; StreamCompaction::Efficient::kernDownSweep<<<upSweepBlockPerGrid, blockSize>>>(roundCount, d, dev_fArray); } int fArrayFinal = 0; cudaMemcpy(&fArrayFinal, dev_fArray + roundCount - 1, sizeof(int), cudaMemcpyDeviceToHost); int totalFalses = eArrayFinal + fArrayFinal; kernComputeTArray<<<bitCheckBlocksPerGrid, blockSize>>>(n, totalFalses, dev_tArray, dev_fArray); kernComputeIndex<<<bitCheckBlocksPerGrid, blockSize>>>(n, dev_dArray, dev_tArray, dev_fArray, dev_bArray); kernReorganizeArray<<<bitCheckBlocksPerGrid, blockSize>>>(n, dev_dArray, dev_iArray, dev_oArray); cudaMemcpy(dev_iArray, dev_oArray, n * sizeof(int), cudaMemcpyDeviceToDevice); curBit++; } cudaMemcpy(odata, dev_iArray, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_iArray); cudaFree(dev_eArray); cudaFree(dev_bArray); cudaFree(dev_fArray); cudaFree(dev_tArray); cudaFree(dev_dArray); cudaFree(dev_oArray); } } }
13e9e49de5091a685fa055d9b9cc4894c9faa0c4.hip
// !!! This is a file automatically generated by hipify!!! #ifdef _WIN32 #include "Windows.h" #endif #include <iostream> #include <fstream> #include <cstdio> // Cuda #include <hip/hip_runtime.h> #include "Common.h" #include "Network.h" Network::Network(const std::vector<std::unique_ptr<LayerDefinition>> &layers) : _nImages(0), _imgDim(0), _iBytes(0), _testRight(0), _isPredict(false) { for (auto& l : layers) _layers.push_back(l.get()); } Network::~Network() { } void Network::train(Data *data, const int &epoch, const double &learningRate) { // Definire la rete setNetwork(data); // Dimensione della singola immagine _imgDim = data->getImgDimension(); for (int j = 0; j < epoch; j++) { // Indice che reperisce la giusta immagine da mandare in input alla rete _imgIndex = 0; for (int i = 0; i < _nImages; i++) { //std::cout << i << " of " << _nImages << "\r"; forwardPropagation(); backPropagation(i, learningRate); // Incrementare l'indice _imgIndex += _imgDim; } } // Cancellare i dati di train dal device CHECK(hipFree(cudaData)); CHECK(hipFree(cudaLabels)); } void Network::predict(Data *data) { _isPredict = true; //Leggere i dati dal test set data->readTestData(); // Caricare i dati in Cuda cudaDataLoad(data); // Ottenere array contenente le labels const uint8_t *labels = data->getLabels(); // Definire dimensione dell'array delle predizioni _predictions.reserve(_nImages); // Indice che reperisce la giusta immagine da mandare in input alla rete _imgIndex = 0; // Elabora ogni immagine for (int i = 0; i < _nImages; i++) { forwardPropagation(); predictLabel(i, labels[i]); // Incrementare l'indice _imgIndex += _imgDim; } // Stampare risultati ottenuti in fase di test printNetworkError(data->getLabelSize()); } void Network::cudaDataLoad(Data *data) { // Impone a Null i puntatori Cuda cudaData = NULL; cudaLabels = NULL; double *cudaDataHost = NULL; double *cudaLabelsHost = NULL; // Numero di esempi presenti _nImages = data->getLabelSize(); const int dBytes = data->getDataSize() * sizeof(double); const int lBytes = _nImages * sizeof(uint8_t); // Allocare la memoria Pinned CHECK(hipHostMalloc((void**)&cudaDataHost, dBytes)); CHECK(hipHostMalloc((void**)&cudaLabelsHost, lBytes)); // Copiare i dati memcpy(cudaDataHost, data->getData(), dBytes); memcpy(cudaLabelsHost, data->getLabels(), lBytes); // Allocare le matrici in GPU CHECK(hipMalloc((void**)&cudaData, dBytes)); CHECK(hipMalloc((void**)&cudaLabels, lBytes)); // Passare i dati CHECK(hipMemcpy(cudaData, cudaDataHost, dBytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(cudaLabels, cudaLabelsHost, lBytes, hipMemcpyHostToDevice)); // Liberare le label dalla CPU (solo in fase di train) if (!_isPredict) data->clearLabels(); // Liberare le immagini dalla CPU data->clearData(); // Liberare la Pinned Memory CHECK(hipHostFree(cudaDataHost)); CHECK(hipHostFree(cudaLabelsHost)); } void Network::cudaInitStruct(Data *data) { _layers.front()->defineCuda(data->getImgWidth(), data->getImgHeight(), data->getImgDepth()); for (auto it = _layers.begin() + 1; it != _layers.end(); ++it) { auto pv = std::prev(it, 1); auto prevWidth = (*pv)->getWidth(); auto prevHeight = (*pv)->getHeight(); auto prevDepth = (*pv)->getDepth(); (*it)->defineCuda(prevWidth, prevHeight, prevDepth); } } void Network::forwardPropagation(void) { _layers.front()->forward_propagation(cudaData + _imgIndex); for (auto it = _layers.begin() + 1; it != _layers.end(); ++it) { auto pv = std::prev(it, 1); auto *outputPointer = (*pv)->getCudaOutputPointer(); (*it)->forward_propagation(outputPointer); } } void Network::backPropagation(const int &target, const double &learningRate) { // Caso in cui ci sia solo un livello if (_layers.size() == 1) { _layers.back()->back_propagation_output(cudaData + _imgIndex, cudaLabels, target, learningRate); return; } // Caso in cui i livelli > 1 (tornare indietro di 2 livelli) auto prevOutput = (*std::prev(_layers.end(), 2))->getCudaOutputPointer(); _layers.back()->back_propagation_output(prevOutput, cudaLabels, target, learningRate); // Back Propagation sui livelli intermedi for (auto it = _layers.rbegin() + 1; it != _layers.rend() - 1; ++it) { auto pv = std::next(it, 1); auto fw = std::prev(it, 1); auto prevOutput = (*pv)->getCudaOutputPointer(); auto prevError = (*fw)->getCudaPrevErrorPointer(); (*it)->back_propagation(prevOutput, prevError, learningRate, true); } // Back Propagation al primo livello (solo input precedente a lui) auto fw = std::next(_layers.begin(), 1); auto prevError = (*fw)->getCudaPrevErrorPointer(); _layers.front()->back_propagation(cudaData + _imgIndex, prevError, learningRate, false); } void Network::printWeightsOnFile(const std::string &filename, Data *data) { std::ofstream ofs(filename.c_str(), std::ios::out | std::ios::binary); if (!ofs.is_open()) { std::cerr << "Errore nell'apertura del file per i pesi!!" << std::endl; cudaClearAll(data); exit(1); } for (auto l : _layers) { auto weights = l->getWeights(); auto weightSize = l->getWeightCount(); auto bias = l->getBias(); auto biaSize = l->getNodeCount(); auto prev = weightSize / biaSize; ofs << "Livello" << std::endl << std::endl; ofs << "Weights Matrix" << std::endl << std::endl; printOnFile(weights, prev, ofs); ofs << std::endl << std::endl; ofs << "Bias Matrix" << std::endl << std::endl; printOnFile(bias, biaSize, ofs); ofs << std::endl << std::endl << std::endl; } ofs.close(); } inline void Network::setNetwork(Data *data) { //Leggere i dati dal training set data->readTrainData(); // Caricare i dati in Cuda cudaDataLoad(data); // Inizializzare le strutture della rete cudaInitStruct(data); } inline void Network::predictLabel(const int &index, const uint8_t &label) { // Calcolare predizione al livello di output int prediction = _layers.back()->getPredictionIndex(); #ifdef DEBUG std::cout << "\n\nPredizione: " << prediction << std::endl; std::cout << "Etichetta: " << unsigned(label) << std::endl << std::endl << std::endl; #endif // Salvare la predizione nell'array _predictions[index] = prediction; // Verificare che la predizione sia corretta if (prediction == label) _testRight++; } inline void Network::printNetworkError(const int &nImages) { // Calcolare accuratezza double accuracy = (static_cast<double>(_testRight) / nImages) * 100; // Stampare numero di errori commessi std::cout << "Immagini classificate correttamente: " << _testRight << std::endl; std::cout << "Immagini classificate scorrettamente: " << nImages - _testRight << std::endl; std::cout << "Accuratezza della rete: " << accuracy << std::endl; } void Network::cudaClearAll(Data *data) { // Cancellare il vettore contenente le labels data->clearLabels(); // Cancellare i dati di test dal device CHECK(hipFree(cudaData)); CHECK(hipFree(cudaLabels)); // Liberare la memoria cuda associata ad ogni layer for (auto l : _layers) l->deleteCuda(); // Liberare la memoria del device CHECK(hipDeviceReset()); } void Network::printW() { for (auto l : _layers) l->printW(); }
13e9e49de5091a685fa055d9b9cc4894c9faa0c4.cu
#ifdef _WIN32 #include "Windows.h" #endif #include <iostream> #include <fstream> #include <cstdio> // Cuda #include <cuda_runtime.h> #include "Common.h" #include "Network.h" Network::Network(const std::vector<std::unique_ptr<LayerDefinition>> &layers) : _nImages(0), _imgDim(0), _iBytes(0), _testRight(0), _isPredict(false) { for (auto& l : layers) _layers.push_back(l.get()); } Network::~Network() { } void Network::train(Data *data, const int &epoch, const double &learningRate) { // Definire la rete setNetwork(data); // Dimensione della singola immagine _imgDim = data->getImgDimension(); for (int j = 0; j < epoch; j++) { // Indice che reperisce la giusta immagine da mandare in input alla rete _imgIndex = 0; for (int i = 0; i < _nImages; i++) { //std::cout << i << " of " << _nImages << "\r"; forwardPropagation(); backPropagation(i, learningRate); // Incrementare l'indice _imgIndex += _imgDim; } } // Cancellare i dati di train dal device CHECK(cudaFree(cudaData)); CHECK(cudaFree(cudaLabels)); } void Network::predict(Data *data) { _isPredict = true; //Leggere i dati dal test set data->readTestData(); // Caricare i dati in Cuda cudaDataLoad(data); // Ottenere array contenente le labels const uint8_t *labels = data->getLabels(); // Definire dimensione dell'array delle predizioni _predictions.reserve(_nImages); // Indice che reperisce la giusta immagine da mandare in input alla rete _imgIndex = 0; // Elabora ogni immagine for (int i = 0; i < _nImages; i++) { forwardPropagation(); predictLabel(i, labels[i]); // Incrementare l'indice _imgIndex += _imgDim; } // Stampare risultati ottenuti in fase di test printNetworkError(data->getLabelSize()); } void Network::cudaDataLoad(Data *data) { // Impone a Null i puntatori Cuda cudaData = NULL; cudaLabels = NULL; double *cudaDataHost = NULL; double *cudaLabelsHost = NULL; // Numero di esempi presenti _nImages = data->getLabelSize(); const int dBytes = data->getDataSize() * sizeof(double); const int lBytes = _nImages * sizeof(uint8_t); // Allocare la memoria Pinned CHECK(cudaMallocHost((void**)&cudaDataHost, dBytes)); CHECK(cudaMallocHost((void**)&cudaLabelsHost, lBytes)); // Copiare i dati memcpy(cudaDataHost, data->getData(), dBytes); memcpy(cudaLabelsHost, data->getLabels(), lBytes); // Allocare le matrici in GPU CHECK(cudaMalloc((void**)&cudaData, dBytes)); CHECK(cudaMalloc((void**)&cudaLabels, lBytes)); // Passare i dati CHECK(cudaMemcpy(cudaData, cudaDataHost, dBytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(cudaLabels, cudaLabelsHost, lBytes, cudaMemcpyHostToDevice)); // Liberare le label dalla CPU (solo in fase di train) if (!_isPredict) data->clearLabels(); // Liberare le immagini dalla CPU data->clearData(); // Liberare la Pinned Memory CHECK(cudaFreeHost(cudaDataHost)); CHECK(cudaFreeHost(cudaLabelsHost)); } void Network::cudaInitStruct(Data *data) { _layers.front()->defineCuda(data->getImgWidth(), data->getImgHeight(), data->getImgDepth()); for (auto it = _layers.begin() + 1; it != _layers.end(); ++it) { auto pv = std::prev(it, 1); auto prevWidth = (*pv)->getWidth(); auto prevHeight = (*pv)->getHeight(); auto prevDepth = (*pv)->getDepth(); (*it)->defineCuda(prevWidth, prevHeight, prevDepth); } } void Network::forwardPropagation(void) { _layers.front()->forward_propagation(cudaData + _imgIndex); for (auto it = _layers.begin() + 1; it != _layers.end(); ++it) { auto pv = std::prev(it, 1); auto *outputPointer = (*pv)->getCudaOutputPointer(); (*it)->forward_propagation(outputPointer); } } void Network::backPropagation(const int &target, const double &learningRate) { // Caso in cui ci sia solo un livello if (_layers.size() == 1) { _layers.back()->back_propagation_output(cudaData + _imgIndex, cudaLabels, target, learningRate); return; } // Caso in cui i livelli > 1 (tornare indietro di 2 livelli) auto prevOutput = (*std::prev(_layers.end(), 2))->getCudaOutputPointer(); _layers.back()->back_propagation_output(prevOutput, cudaLabels, target, learningRate); // Back Propagation sui livelli intermedi for (auto it = _layers.rbegin() + 1; it != _layers.rend() - 1; ++it) { auto pv = std::next(it, 1); auto fw = std::prev(it, 1); auto prevOutput = (*pv)->getCudaOutputPointer(); auto prevError = (*fw)->getCudaPrevErrorPointer(); (*it)->back_propagation(prevOutput, prevError, learningRate, true); } // Back Propagation al primo livello (solo input precedente a lui) auto fw = std::next(_layers.begin(), 1); auto prevError = (*fw)->getCudaPrevErrorPointer(); _layers.front()->back_propagation(cudaData + _imgIndex, prevError, learningRate, false); } void Network::printWeightsOnFile(const std::string &filename, Data *data) { std::ofstream ofs(filename.c_str(), std::ios::out | std::ios::binary); if (!ofs.is_open()) { std::cerr << "Errore nell'apertura del file per i pesi!!" << std::endl; cudaClearAll(data); exit(1); } for (auto l : _layers) { auto weights = l->getWeights(); auto weightSize = l->getWeightCount(); auto bias = l->getBias(); auto biaSize = l->getNodeCount(); auto prev = weightSize / biaSize; ofs << "Livello" << std::endl << std::endl; ofs << "Weights Matrix" << std::endl << std::endl; printOnFile(weights, prev, ofs); ofs << std::endl << std::endl; ofs << "Bias Matrix" << std::endl << std::endl; printOnFile(bias, biaSize, ofs); ofs << std::endl << std::endl << std::endl; } ofs.close(); } inline void Network::setNetwork(Data *data) { //Leggere i dati dal training set data->readTrainData(); // Caricare i dati in Cuda cudaDataLoad(data); // Inizializzare le strutture della rete cudaInitStruct(data); } inline void Network::predictLabel(const int &index, const uint8_t &label) { // Calcolare predizione al livello di output int prediction = _layers.back()->getPredictionIndex(); #ifdef DEBUG std::cout << "\n\nPredizione: " << prediction << std::endl; std::cout << "Etichetta: " << unsigned(label) << std::endl << std::endl << std::endl; #endif // Salvare la predizione nell'array _predictions[index] = prediction; // Verificare che la predizione sia corretta if (prediction == label) _testRight++; } inline void Network::printNetworkError(const int &nImages) { // Calcolare accuratezza double accuracy = (static_cast<double>(_testRight) / nImages) * 100; // Stampare numero di errori commessi std::cout << "Immagini classificate correttamente: " << _testRight << std::endl; std::cout << "Immagini classificate scorrettamente: " << nImages - _testRight << std::endl; std::cout << "Accuratezza della rete: " << accuracy << std::endl; } void Network::cudaClearAll(Data *data) { // Cancellare il vettore contenente le labels data->clearLabels(); // Cancellare i dati di test dal device CHECK(cudaFree(cudaData)); CHECK(cudaFree(cudaLabels)); // Liberare la memoria cuda associata ad ogni layer for (auto l : _layers) l->deleteCuda(); // Liberare la memoria del device CHECK(cudaDeviceReset()); } void Network::printW() { for (auto l : _layers) l->printW(); }
332548d9b0685904aaec299aafaac12659177972.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <THH/THHNumerics.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } __device__ inline int max(int a, int b) { return a >= b ? a : b; } template <typename scalar_t, typename accscalar_t, bool COUNT_INCLUDE_PAD, bool USE_DIVISOR> __global__ void avg_pool2d_out_cuda_frame(const int nthreads, const scalar_t* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const top_data, const int divisor_override) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); accscalar_t aveval = accscalar_t(0); const scalar_t* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } int divide_factor; if (USE_DIVISOR) { divide_factor = divisor_override; } else { if(COUNT_INCLUDE_PAD) { divide_factor = pool_size; } else { divide_factor = (hend - hstart) * (wend - wstart); } } top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / divide_factor); } } template <typename scalar_t, typename accscalar_t, bool COUNT_INCLUDE_PAD, bool USE_DIVISOR> __global__ void avg_pool2d_backward_out_cuda_frame(const int nthreads, const scalar_t* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const bottom_diff, const int divisor_override) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); accscalar_t gradient = accscalar_t(0); const scalar_t* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); int divide_factor; if (USE_DIVISOR) { divide_factor = divisor_override; } else { if(COUNT_INCLUDE_PAD) { divide_factor = pool_size; } else { divide_factor = (hend - hstart) * (wend - wstart); } } gradient += top_diff_slice[ph * pooled_width + pw] / divide_factor; } } bottom_diff[index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } void avg_pool2d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { TensorArg output_arg{ output, "output", 1 }; TensorArg input_arg{ input_, "input_", 2 }; checkAllSameGPU("avg_pool2d_out_cuda", {output_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints"); const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2, "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints"); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "avg_pool2d: padding must either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0, "divisor must be not zero"); const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode); pool2d_shape_check( input_, kH, kW, dH, dW, padH, padW, 1, 1, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); Tensor input = input_.contiguous(); output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); const int count = safe_downcast<int, int64_t>(output.numel()); const int num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); if (divisor_override.has_value()) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); hipLaunchKernelGGL(( avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, false, true>) , dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, output_data, divisor_override.value()); } ); } else { if (count_include_pad) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); hipLaunchKernelGGL(( avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, true, false>) , dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, output_data, 0); } ); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); hipLaunchKernelGGL(( avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, false, false>) , dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, output_data, 0); } ); } } TORCH_CHECK(hipGetLastError() == hipSuccess, "avg_pool2d_out_cuda_frame failed with error code ", hipGetLastError()); if (input.ndimension() == 3) { output.resize_({nInputPlane, outputHeight, outputWidth}); } } Tensor& avg_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("avg_pool2d_backward_out_cuda", {gradInput_arg, gradOutput_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints"); const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2, "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints"); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "avg_pool2d: padding must either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0, "divisor must be not zero"); const Tensor input = input_.contiguous(); const Tensor gradOutput = gradOutput_.contiguous(); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode); avg_pool2d_backward_shape_check( input_, gradOutput_, nbatch, kH, kW, dH, dW, padH, padW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); gradInput.resize_as_(input); const int count = safe_downcast<int, int64_t>(input.numel()); const int num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); if (divisor_override.has_value()) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_backward_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); hipLaunchKernelGGL(( avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, false, true>) , dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, gradInput_data, divisor_override.value()); } ); } else { if (count_include_pad) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_backward_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); hipLaunchKernelGGL(( avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, true, false>) , dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, gradInput_data, 0); } ); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_backward_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); hipLaunchKernelGGL(( avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, false, false>) , dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, gradInput_data, 0); } ); } } TORCH_CHECK(hipGetLastError() == hipSuccess, "avg_pool2d_backward_out_cuda failed with error code ", hipGetLastError()); return gradInput; } } // namespace Tensor& avg_pool2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { avg_pool2d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return output; } Tensor avg_pool2d_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { Tensor output = at::empty({0}, input.options()); avg_pool2d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return output; } Tensor& avg_pool2d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { avg_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return gradInput; } Tensor avg_pool2d_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { auto gradInput = at::zeros_like(input); avg_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return gradInput; } } // at::native } // at
332548d9b0685904aaec299aafaac12659177972.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <THC/THCNumerics.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } __device__ inline int max(int a, int b) { return a >= b ? a : b; } template <typename scalar_t, typename accscalar_t, bool COUNT_INCLUDE_PAD, bool USE_DIVISOR> __global__ void avg_pool2d_out_cuda_frame(const int nthreads, const scalar_t* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const top_data, const int divisor_override) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); accscalar_t aveval = accscalar_t(0); const scalar_t* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } int divide_factor; if (USE_DIVISOR) { divide_factor = divisor_override; } else { if(COUNT_INCLUDE_PAD) { divide_factor = pool_size; } else { divide_factor = (hend - hstart) * (wend - wstart); } } top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / divide_factor); } } template <typename scalar_t, typename accscalar_t, bool COUNT_INCLUDE_PAD, bool USE_DIVISOR> __global__ void avg_pool2d_backward_out_cuda_frame(const int nthreads, const scalar_t* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const bottom_diff, const int divisor_override) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); accscalar_t gradient = accscalar_t(0); const scalar_t* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); int divide_factor; if (USE_DIVISOR) { divide_factor = divisor_override; } else { if(COUNT_INCLUDE_PAD) { divide_factor = pool_size; } else { divide_factor = (hend - hstart) * (wend - wstart); } } gradient += top_diff_slice[ph * pooled_width + pw] / divide_factor; } } bottom_diff[index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } void avg_pool2d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { TensorArg output_arg{ output, "output", 1 }; TensorArg input_arg{ input_, "input_", 2 }; checkAllSameGPU("avg_pool2d_out_cuda", {output_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints"); const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2, "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints"); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "avg_pool2d: padding must either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0, "divisor must be not zero"); const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode); pool2d_shape_check( input_, kH, kW, dH, dW, padH, padW, 1, 1, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); Tensor input = input_.contiguous(); output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); const int count = safe_downcast<int, int64_t>(output.numel()); const int num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); if (divisor_override.has_value()) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, false, true> <<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, output_data, divisor_override.value()); } ); } else { if (count_include_pad) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, true, false> <<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, output_data, 0); } ); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, false, false> <<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, output_data, 0); } ); } } TORCH_CHECK(cudaGetLastError() == cudaSuccess, "avg_pool2d_out_cuda_frame failed with error code ", cudaGetLastError()); if (input.ndimension() == 3) { output.resize_({nInputPlane, outputHeight, outputWidth}); } } Tensor& avg_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("avg_pool2d_backward_out_cuda", {gradInput_arg, gradOutput_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints"); const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2, "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints"); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "avg_pool2d: padding must either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0, "divisor must be not zero"); const Tensor input = input_.contiguous(); const Tensor gradOutput = gradOutput_.contiguous(); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode); avg_pool2d_backward_shape_check( input_, gradOutput_, nbatch, kH, kW, dH, dW, padH, padW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); gradInput.resize_as_(input); const int count = safe_downcast<int, int64_t>(input.numel()); const int num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); if (divisor_override.has_value()) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_backward_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, false, true> <<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, gradInput_data, divisor_override.value()); } ); } else { if (count_include_pad) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_backward_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, true, false> <<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, gradInput_data, 0); } ); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_backward_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, false, false> <<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, gradInput_data, 0); } ); } } TORCH_CHECK(cudaGetLastError() == cudaSuccess, "avg_pool2d_backward_out_cuda failed with error code ", cudaGetLastError()); return gradInput; } } // namespace Tensor& avg_pool2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { avg_pool2d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return output; } Tensor avg_pool2d_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { Tensor output = at::empty({0}, input.options()); avg_pool2d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return output; } Tensor& avg_pool2d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { avg_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return gradInput; } Tensor avg_pool2d_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { auto gradInput = at::zeros_like(input); avg_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return gradInput; } } // at::native } // at
Activation.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define _USE_MATH_DEFINES #include <ATen/native/Activation.h> #include <cmath> #include <thrust/tuple.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/core/Array.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/native/hip/Loops.cuh> #include <c10/hip/HIPMathCompat.h> namespace at { namespace native { // ----------------------------------- // prelu forward // ----------------------------------- template <typename scalar_t> void prelu_cuda_kernel_share_weights( const Tensor& input, Tensor& result, const scalar_t* weight_data) { auto iter = TensorIterator::unary_op(result, input); at::native::gpu_kernel(iter, [weight_data] GPU_LAMBDA (scalar_t input_val) { return (input_val > 0) ? input_val : *weight_data * input_val; }); } template <typename scalar_t> __global__ void prelu_cuda_kernel_multi_weights( scalar_t* result_data, const scalar_t* input_data, const scalar_t* weight_data, int64_t input_stride0, int64_t input_stride1, int64_t input_numel) { int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x; if (linearId >= input_numel) return; // multiply values at each channel with weight[channel_index] int64_t channel = (linearId % input_stride0) / input_stride1; scalar_t input_data_val = input_data[linearId]; result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val; } Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) { TORCH_CHECK(self.is_cuda()); TORCH_CHECK(weight_.is_cuda()); auto input = self.contiguous(); auto weight = weight_.contiguous(); TORCH_CHECK(input.is_contiguous()); TORCH_CHECK(weight.is_contiguous()); int64_t weight_num = weight.numel(); Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto strides = input.strides(); // case1: shared weight for all channels if (weight_num == 1) { AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] { prelu_cuda_kernel_share_weights<scalar_t>( input, result, weight.data_ptr<scalar_t>()); }); } else { // case2: multiple weights, one for each channel int64_t input_ndim = input.dim(); TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor."); int64_t channel_size = 1; // channel_size default to 1 int64_t input_stride0 = 1, input_stride1 = 1; if (input_ndim > 1) { channel_size = input.size(1); // channel is the 2nd dim of input input_stride0 = strides[0]; input_stride1 = strides[1]; } TORCH_CHECK(channel_size == weight_num, "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num, " and channel size = ", channel_size, "."); // config to run cuda kernel int64_t input_numel = input.numel(); const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel)); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice); TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions"); AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] { hipLaunchKernelGGL(( prelu_cuda_kernel_multi_weights<scalar_t>) , dim3(grid), dim3(block), 0, stream, result.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), weight.data_ptr<scalar_t>(), input_stride0, input_stride1, input_numel); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } return result; } // ----------------------------------- // prelu backward // ----------------------------------- template <typename scalar_t> void prelu_cuda_backward_kernel_share_weights( const Tensor& input, const Tensor& grad_out, Tensor& input_grad, Tensor& weight_grad_collector, const scalar_t* weight_data) { at::TensorIterator iter = TensorIteratorConfig() .add_output(input_grad) .add_output(weight_grad_collector) .add_input(input) .add_input(grad_out) .build(); // N.B. `std::tuple` does not support `::operator=` on device code. gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple<scalar_t, scalar_t> { scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out; scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out; return {input_grad, weight_grad_collector}; }); } template <typename scalar_t> __global__ void prelu_cuda_backward_kernel_multi_weights( const scalar_t* input_data, const scalar_t* weight_data, const scalar_t* grad_out_data, scalar_t* input_grad_data, scalar_t* weight_grad_collector, int64_t input_stride0, int64_t input_stride1, int64_t input_numel) { int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x; if (linearId >= input_numel) return; int64_t channel = (linearId % input_stride0) / input_stride1; scalar_t input_data_val = input_data[linearId]; scalar_t grad_out_data_val = grad_out_data[linearId]; input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val; weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val; } std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) { TORCH_CHECK(grad_out_.is_cuda()); TORCH_CHECK(self.is_cuda()); TORCH_CHECK(weight_.is_cuda()); auto input = self.contiguous(); auto grad_out = grad_out_.contiguous(); auto weight = weight_.contiguous(); TORCH_CHECK(input.is_contiguous()); TORCH_CHECK(weight.is_contiguous()); TORCH_CHECK(grad_out.is_contiguous()); int64_t weight_num = weight.numel(); auto strides = input.strides(); auto dims = input.dim(); Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); // case1: shared parameter for all channels if (weight_num == 1) { AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] { prelu_cuda_backward_kernel_share_weights<scalar_t>( input, grad_out, input_grad, weight_grad_collector, weight.data_ptr<scalar_t>()); }); weight_grad.fill_(weight_grad_collector.sum()); } else { // case2: multiple parameters, one for each channel int64_t input_ndim = input.dim(); TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor."); int64_t channel_size = 1; // channel_size default to 1 int64_t input_stride0 = 1, input_stride1 = 1; if (input_ndim > 1) { channel_size = input.size(1); // channel is the 2nd dim of input input_stride0 = strides[0]; input_stride1 = strides[1]; } TORCH_CHECK(channel_size == weight_num, "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num, " and channel size = ", channel_size, "."); // config to run cuda kernel int64_t input_numel = input.numel(); const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel)); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice); TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions"); AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] { hipLaunchKernelGGL(( prelu_cuda_backward_kernel_multi_weights<scalar_t>) , dim3(grid), dim3(block), 0, stream, input.data_ptr<scalar_t>(), weight.data_ptr<scalar_t>(), grad_out.data_ptr<scalar_t>(), input_grad.data_ptr<scalar_t>(), weight_grad_collector.data_ptr<scalar_t>(), input_stride0, input_stride1, input_numel); C10_HIP_KERNEL_LAUNCH_CHECK(); }); // update weight_grad std::vector<int64_t> reduce_dims; reduce_dims.push_back(0); if (dims > 2) { for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i); } weight_grad = weight_grad_collector.sum(reduce_dims); } return std::tuple<Tensor, Tensor>{input_grad, weight_grad}; } // ----------------------------------- // hardshrink // ----------------------------------- void hardshrink_kernel(TensorIterator& iter, Scalar value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t { return (a >= -lambd && a <= lambd) ? scalar_t(0) : a; }); }); } void softshrink_kernel(TensorIterator& iter, Scalar value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0)); }); }); } void shrink_backward_kernel(TensorIterator& iter, Scalar value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t { return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val; }); }); } void hardtanh_backward_kernel(TensorIterator& iter, Scalar min, Scalar max) { AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() { auto min_val = min.to<scalar_t>(); auto max_val = max.to<scalar_t>(); gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a; }); }); } void softplus_kernel(TensorIterator& iter, Scalar beta_, Scalar threshold_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() { auto beta = beta_.to<scalar_t>(); auto threshold = threshold_.to<scalar_t>(); gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t { return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(::exp(a * beta))) / beta; }); }); } void softplus_backward_kernel(TensorIterator& iter, Scalar beta_, Scalar threshold_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() { auto beta = beta_.to<scalar_t>(); auto threshold = threshold_.to<scalar_t>(); gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { scalar_t z = ::exp(b * beta); return (b * beta) > threshold ? a : a * (z - scalar_t(1.)) / z; }); }); } template <typename scalar_t> void threshold_kernel_impl(TensorIterator& iter, scalar_t threshold, scalar_t value) { gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t { return x <= threshold ? value : other; }); } static void threshold_kernel(TensorIterator& iter, Scalar threshold, Scalar value) { AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] { threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>()); }); } void elu_kernel(TensorIterator& iter, Scalar alpha, Scalar scale, Scalar input_scale) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() { auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>(); auto poscoef = scale.to<scalar_t>(); auto negiptcoef = input_scale.to<scalar_t>(); gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > scalar_t(0) ? a * poscoef : (static_cast<scalar_t>(::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef; }); }); } void elu_backward_kernel(TensorIterator& iter, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() { auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>(); auto poscoef = scale.to<scalar_t>(); auto negiptcoef = input_scale.to<scalar_t>(); gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { if (is_result) { return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef; } else { return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast<scalar_t>(::exp(b * negiptcoef))) : a * poscoef; } }); }); } namespace { void GeluCUDAKernelImpl(TensorIterator& it) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() { using T_ACC = acc_type<scalar_t, true>; gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t { return static_cast<T_ACC>(x) * c10::hip::compat::normcdf(static_cast<T_ACC>(x)); }); }); } void GeluBackwardCUDAKernelImpl(TensorIterator& it) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() { using T_ACC = acc_type<scalar_t, true>; gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5); const T_ACC cdf = c10::hip::compat::normcdf(static_cast<T_ACC>(x)); const T_ACC pdf = c10::hip::compat::exp( T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) * kBeta; return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf); }); }); } void leaky_relu_kernel(TensorIterator& iter, Scalar negval_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() { auto negval = negval_.to<scalar_t>(); gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > scalar_t(0) ? a : a * negval; }); }); } void leaky_relu_backward_kernel(TensorIterator& iter, Scalar negval_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() { auto negval = negval_.to<scalar_t>(); gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a > scalar_t(0) ? b : b * negval; }); }); } void hardswish_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC one_sixth(1.0f / 6.0f); const T_ACC three(3.0f); const T_ACC six(6.0f); gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t { T_ACC x = static_cast<T_ACC>(self_val); return x * ::min(::max(x + three, zero), six) * one_sixth; }); }); } void hardswish_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC three(3.0f); const T_ACC neg_three(-3.0f); const T_ACC one_half(0.5f); gpu_kernel( iter, [zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t { T_ACC grad_val = static_cast<T_ACC>(grad_val_); T_ACC self_val = static_cast<T_ACC>(self_val_); if (self_val < neg_three) { return zero; } else if (self_val <= three) { return grad_val * ((self_val / three) + one_half); } else { return grad_val; } }); }); } void hardsigmoid_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC one_sixth(1.0f / 6.0f); const T_ACC three(3.0f); const T_ACC six(6.0f); gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t { T_ACC x = static_cast<T_ACC>(self_val); return ::min(::max(x + three, zero), six) * one_sixth; }); }); } void hardsigmoid_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC three(3.0f); const T_ACC neg_three(-3.0f); const T_ACC one_sixth(1.0f / 6.0f); gpu_kernel( iter, [zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t { T_ACC grad_val = static_cast<T_ACC>(grad_val_); T_ACC self_val = static_cast<T_ACC>(self_val_); return (self_val >= neg_three && self_val <= three) ? grad_val * one_sixth : zero; }); }); } void silu_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "silu_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t { using T_ACC = acc_type<scalar_t, true>; const T_ACC x_acc = static_cast<T_ACC>(x); return x_acc / (T_ACC(1) + c10::hip::compat::exp(-x_acc)); }); }); } void silu_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "silu_backward_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { using T_ACC = acc_type<scalar_t, true>; const T_ACC dy_acc = static_cast<T_ACC>(dy); const T_ACC x_acc = static_cast<T_ACC>(x); const T_ACC s_acc = T_ACC(1) / (T_ACC(1) + c10::hip::compat::exp(-x_acc)); return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc)); }); }); } } // namespace Tensor gelu_cuda(const Tensor& self) { Tensor Y = at::native::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto it = TensorIterator::unary_op(Y, self); GeluCUDAKernelImpl(it); return Y; } Tensor gelu_backward_cuda(const Tensor& grad, const Tensor& self) { Tensor dX = at::native::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto it = TensorIterator::binary_op(dX, grad, self); GeluBackwardCUDAKernelImpl(it); return dX; } // computes `result = self <= threshold ? value : other` // other is `self` in threshold() and `grad` in threshold_backward() static Tensor threshold_out_cuda( optional<Tensor> opt_result, const Tensor& self, Scalar threshold, Scalar value, const Tensor& other) { Tensor result = opt_result.value_or(Tensor()); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) // threshold is idempotent, so overlap is okay .add_output(result) .add_input(self) .add_input(other) .allow_cpu_scalars(true) .promote_inputs_to_common_dtype(true) .cast_common_dtype_to_outputs(true) .enforce_safe_casting_to_output(true) .build(); threshold_kernel(iter, threshold, value); return iter.output(); } Tensor threshold_cuda(const Tensor& self, Scalar threshold, Scalar value) { return threshold_out_cuda(nullopt, self, threshold, value, self); } Tensor& threshold__cuda(Tensor& self, Scalar threshold, Scalar value) { threshold_out_cuda(make_optional(self), self, threshold, value, self); return self; } Tensor& threshold_out_cuda(Tensor& result, const Tensor& self, Scalar threshold, Scalar value) { threshold_out_cuda(make_optional(result), self, threshold, value, self); return result; } Tensor threshold_backward_cuda(const Tensor& grad, const Tensor& self, Scalar threshold) { return threshold_out_cuda(nullopt, self, threshold, 0, grad); } REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel); REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel); REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel); REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel); REGISTER_DISPATCH(elu_stub, &elu_kernel); REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel); REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel); REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel); REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel); REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel); REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel); REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel); REGISTER_DISPATCH(softplus_stub, &softplus_kernel); REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel); REGISTER_DISPATCH(silu_stub, &silu_kernel); REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel); } // namespace native } // namespace at
Activation.cu
#define _USE_MATH_DEFINES #include <ATen/native/Activation.h> #include <cmath> #include <thrust/tuple.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/core/Array.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/native/cuda/Loops.cuh> #include <c10/cuda/CUDAMathCompat.h> namespace at { namespace native { // ----------------------------------- // prelu forward // ----------------------------------- template <typename scalar_t> void prelu_cuda_kernel_share_weights( const Tensor& input, Tensor& result, const scalar_t* weight_data) { auto iter = TensorIterator::unary_op(result, input); at::native::gpu_kernel(iter, [weight_data] GPU_LAMBDA (scalar_t input_val) { return (input_val > 0) ? input_val : *weight_data * input_val; }); } template <typename scalar_t> __global__ void prelu_cuda_kernel_multi_weights( scalar_t* result_data, const scalar_t* input_data, const scalar_t* weight_data, int64_t input_stride0, int64_t input_stride1, int64_t input_numel) { int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x; if (linearId >= input_numel) return; // multiply values at each channel with weight[channel_index] int64_t channel = (linearId % input_stride0) / input_stride1; scalar_t input_data_val = input_data[linearId]; result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val; } Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) { TORCH_CHECK(self.is_cuda()); TORCH_CHECK(weight_.is_cuda()); auto input = self.contiguous(); auto weight = weight_.contiguous(); TORCH_CHECK(input.is_contiguous()); TORCH_CHECK(weight.is_contiguous()); int64_t weight_num = weight.numel(); Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto strides = input.strides(); // case1: shared weight for all channels if (weight_num == 1) { AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] { prelu_cuda_kernel_share_weights<scalar_t>( input, result, weight.data_ptr<scalar_t>()); }); } else { // case2: multiple weights, one for each channel int64_t input_ndim = input.dim(); TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor."); int64_t channel_size = 1; // channel_size default to 1 int64_t input_stride0 = 1, input_stride1 = 1; if (input_ndim > 1) { channel_size = input.size(1); // channel is the 2nd dim of input input_stride0 = strides[0]; input_stride1 = strides[1]; } TORCH_CHECK(channel_size == weight_num, "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num, " and channel size = ", channel_size, "."); // config to run cuda kernel int64_t input_numel = input.numel(); const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel)); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions"); AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] { prelu_cuda_kernel_multi_weights<scalar_t> <<<grid, block, 0, stream>>>( result.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), weight.data_ptr<scalar_t>(), input_stride0, input_stride1, input_numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } return result; } // ----------------------------------- // prelu backward // ----------------------------------- template <typename scalar_t> void prelu_cuda_backward_kernel_share_weights( const Tensor& input, const Tensor& grad_out, Tensor& input_grad, Tensor& weight_grad_collector, const scalar_t* weight_data) { at::TensorIterator iter = TensorIteratorConfig() .add_output(input_grad) .add_output(weight_grad_collector) .add_input(input) .add_input(grad_out) .build(); // N.B. `std::tuple` does not support `::operator=` on device code. gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple<scalar_t, scalar_t> { scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out; scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out; return {input_grad, weight_grad_collector}; }); } template <typename scalar_t> __global__ void prelu_cuda_backward_kernel_multi_weights( const scalar_t* input_data, const scalar_t* weight_data, const scalar_t* grad_out_data, scalar_t* input_grad_data, scalar_t* weight_grad_collector, int64_t input_stride0, int64_t input_stride1, int64_t input_numel) { int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x; if (linearId >= input_numel) return; int64_t channel = (linearId % input_stride0) / input_stride1; scalar_t input_data_val = input_data[linearId]; scalar_t grad_out_data_val = grad_out_data[linearId]; input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val; weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val; } std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) { TORCH_CHECK(grad_out_.is_cuda()); TORCH_CHECK(self.is_cuda()); TORCH_CHECK(weight_.is_cuda()); auto input = self.contiguous(); auto grad_out = grad_out_.contiguous(); auto weight = weight_.contiguous(); TORCH_CHECK(input.is_contiguous()); TORCH_CHECK(weight.is_contiguous()); TORCH_CHECK(grad_out.is_contiguous()); int64_t weight_num = weight.numel(); auto strides = input.strides(); auto dims = input.dim(); Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); // case1: shared parameter for all channels if (weight_num == 1) { AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] { prelu_cuda_backward_kernel_share_weights<scalar_t>( input, grad_out, input_grad, weight_grad_collector, weight.data_ptr<scalar_t>()); }); weight_grad.fill_(weight_grad_collector.sum()); } else { // case2: multiple parameters, one for each channel int64_t input_ndim = input.dim(); TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor."); int64_t channel_size = 1; // channel_size default to 1 int64_t input_stride0 = 1, input_stride1 = 1; if (input_ndim > 1) { channel_size = input.size(1); // channel is the 2nd dim of input input_stride0 = strides[0]; input_stride1 = strides[1]; } TORCH_CHECK(channel_size == weight_num, "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num, " and channel size = ", channel_size, "."); // config to run cuda kernel int64_t input_numel = input.numel(); const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel)); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions"); AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] { prelu_cuda_backward_kernel_multi_weights<scalar_t> <<<grid, block, 0, stream>>>( input.data_ptr<scalar_t>(), weight.data_ptr<scalar_t>(), grad_out.data_ptr<scalar_t>(), input_grad.data_ptr<scalar_t>(), weight_grad_collector.data_ptr<scalar_t>(), input_stride0, input_stride1, input_numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); // update weight_grad std::vector<int64_t> reduce_dims; reduce_dims.push_back(0); if (dims > 2) { for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i); } weight_grad = weight_grad_collector.sum(reduce_dims); } return std::tuple<Tensor, Tensor>{input_grad, weight_grad}; } // ----------------------------------- // hardshrink // ----------------------------------- void hardshrink_kernel(TensorIterator& iter, Scalar value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t { return (a >= -lambd && a <= lambd) ? scalar_t(0) : a; }); }); } void softshrink_kernel(TensorIterator& iter, Scalar value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0)); }); }); } void shrink_backward_kernel(TensorIterator& iter, Scalar value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t { return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val; }); }); } void hardtanh_backward_kernel(TensorIterator& iter, Scalar min, Scalar max) { AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() { auto min_val = min.to<scalar_t>(); auto max_val = max.to<scalar_t>(); gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a; }); }); } void softplus_kernel(TensorIterator& iter, Scalar beta_, Scalar threshold_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() { auto beta = beta_.to<scalar_t>(); auto threshold = threshold_.to<scalar_t>(); gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t { return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(std::exp(a * beta))) / beta; }); }); } void softplus_backward_kernel(TensorIterator& iter, Scalar beta_, Scalar threshold_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() { auto beta = beta_.to<scalar_t>(); auto threshold = threshold_.to<scalar_t>(); gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { scalar_t z = std::exp(b * beta); return (b * beta) > threshold ? a : a * (z - scalar_t(1.)) / z; }); }); } template <typename scalar_t> void threshold_kernel_impl(TensorIterator& iter, scalar_t threshold, scalar_t value) { gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t { return x <= threshold ? value : other; }); } static void threshold_kernel(TensorIterator& iter, Scalar threshold, Scalar value) { AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] { threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>()); }); } void elu_kernel(TensorIterator& iter, Scalar alpha, Scalar scale, Scalar input_scale) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() { auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>(); auto poscoef = scale.to<scalar_t>(); auto negiptcoef = input_scale.to<scalar_t>(); gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > scalar_t(0) ? a * poscoef : (static_cast<scalar_t>(std::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef; }); }); } void elu_backward_kernel(TensorIterator& iter, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() { auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>(); auto poscoef = scale.to<scalar_t>(); auto negiptcoef = input_scale.to<scalar_t>(); gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { if (is_result) { return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef; } else { return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast<scalar_t>(std::exp(b * negiptcoef))) : a * poscoef; } }); }); } namespace { void GeluCUDAKernelImpl(TensorIterator& it) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() { using T_ACC = acc_type<scalar_t, true>; gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t { return static_cast<T_ACC>(x) * c10::cuda::compat::normcdf(static_cast<T_ACC>(x)); }); }); } void GeluBackwardCUDAKernelImpl(TensorIterator& it) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() { using T_ACC = acc_type<scalar_t, true>; gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5); const T_ACC cdf = c10::cuda::compat::normcdf(static_cast<T_ACC>(x)); const T_ACC pdf = c10::cuda::compat::exp( T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) * kBeta; return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf); }); }); } void leaky_relu_kernel(TensorIterator& iter, Scalar negval_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() { auto negval = negval_.to<scalar_t>(); gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > scalar_t(0) ? a : a * negval; }); }); } void leaky_relu_backward_kernel(TensorIterator& iter, Scalar negval_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() { auto negval = negval_.to<scalar_t>(); gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a > scalar_t(0) ? b : b * negval; }); }); } void hardswish_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC one_sixth(1.0f / 6.0f); const T_ACC three(3.0f); const T_ACC six(6.0f); gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t { T_ACC x = static_cast<T_ACC>(self_val); return x * std::min(std::max(x + three, zero), six) * one_sixth; }); }); } void hardswish_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC three(3.0f); const T_ACC neg_three(-3.0f); const T_ACC one_half(0.5f); gpu_kernel( iter, [zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t { T_ACC grad_val = static_cast<T_ACC>(grad_val_); T_ACC self_val = static_cast<T_ACC>(self_val_); if (self_val < neg_three) { return zero; } else if (self_val <= three) { return grad_val * ((self_val / three) + one_half); } else { return grad_val; } }); }); } void hardsigmoid_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC one_sixth(1.0f / 6.0f); const T_ACC three(3.0f); const T_ACC six(6.0f); gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t { T_ACC x = static_cast<T_ACC>(self_val); return std::min(std::max(x + three, zero), six) * one_sixth; }); }); } void hardsigmoid_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC three(3.0f); const T_ACC neg_three(-3.0f); const T_ACC one_sixth(1.0f / 6.0f); gpu_kernel( iter, [zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t { T_ACC grad_val = static_cast<T_ACC>(grad_val_); T_ACC self_val = static_cast<T_ACC>(self_val_); return (self_val >= neg_three && self_val <= three) ? grad_val * one_sixth : zero; }); }); } void silu_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "silu_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t { using T_ACC = acc_type<scalar_t, true>; const T_ACC x_acc = static_cast<T_ACC>(x); return x_acc / (T_ACC(1) + c10::cuda::compat::exp(-x_acc)); }); }); } void silu_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "silu_backward_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { using T_ACC = acc_type<scalar_t, true>; const T_ACC dy_acc = static_cast<T_ACC>(dy); const T_ACC x_acc = static_cast<T_ACC>(x); const T_ACC s_acc = T_ACC(1) / (T_ACC(1) + c10::cuda::compat::exp(-x_acc)); return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc)); }); }); } } // namespace Tensor gelu_cuda(const Tensor& self) { Tensor Y = at::native::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto it = TensorIterator::unary_op(Y, self); GeluCUDAKernelImpl(it); return Y; } Tensor gelu_backward_cuda(const Tensor& grad, const Tensor& self) { Tensor dX = at::native::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto it = TensorIterator::binary_op(dX, grad, self); GeluBackwardCUDAKernelImpl(it); return dX; } // computes `result = self <= threshold ? value : other` // other is `self` in threshold() and `grad` in threshold_backward() static Tensor threshold_out_cuda( optional<Tensor> opt_result, const Tensor& self, Scalar threshold, Scalar value, const Tensor& other) { Tensor result = opt_result.value_or(Tensor()); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) // threshold is idempotent, so overlap is okay .add_output(result) .add_input(self) .add_input(other) .allow_cpu_scalars(true) .promote_inputs_to_common_dtype(true) .cast_common_dtype_to_outputs(true) .enforce_safe_casting_to_output(true) .build(); threshold_kernel(iter, threshold, value); return iter.output(); } Tensor threshold_cuda(const Tensor& self, Scalar threshold, Scalar value) { return threshold_out_cuda(nullopt, self, threshold, value, self); } Tensor& threshold__cuda(Tensor& self, Scalar threshold, Scalar value) { threshold_out_cuda(make_optional(self), self, threshold, value, self); return self; } Tensor& threshold_out_cuda(Tensor& result, const Tensor& self, Scalar threshold, Scalar value) { threshold_out_cuda(make_optional(result), self, threshold, value, self); return result; } Tensor threshold_backward_cuda(const Tensor& grad, const Tensor& self, Scalar threshold) { return threshold_out_cuda(nullopt, self, threshold, 0, grad); } REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel); REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel); REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel); REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel); REGISTER_DISPATCH(elu_stub, &elu_kernel); REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel); REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel); REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel); REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel); REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel); REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel); REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel); REGISTER_DISPATCH(softplus_stub, &softplus_kernel); REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel); REGISTER_DISPATCH(silu_stub, &silu_kernel); REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel); } // namespace native } // namespace at
8b9a5ce49e157638d4cee8d3f5246fd7849f4c01.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hipfft.h> // CUDA FFT Libraries #include "defines.h" #include "gp_kernels.h" #include "chamber.hpp" // ***************************** // Useful functions //****************************** __device__ unsigned char clip(double x) {return x > 255 ? 255 : (x < 0 ? 0 : x); } __device__ uchar4 viridis(double value) { uchar4 result; result.x = clip(255 * ( 2.854 * pow(value, 3) - 2.098 * pow(value, 2) + 0.037 * value + 0.254)); result.y = clip(255 * (-0.176 * pow(value, 3) - 0.167 * pow(value, 2) + 1.243 * value + 0.016)); result.z = clip(255 * ( 0.261 * pow(value, 3) - 1.833 * pow(value, 2) + 1.275 * value + 0.309)); result.w = 255; return result; } __device__ uchar4 inferno(double value) { uchar4 result; result.x = clip(255 * (-1.760 * pow(value, 3) + 1.487 * pow(value, 2) + 1.223 * value - 0.034)); result.y = clip(255 * ( 0.284 * pow(value, 3) - 0.827 * pow(value, 2) - 0.086 * value + 0.026)); result.z = clip(255 * ( 7.533 * pow(value, 3) - 11.435 * pow(value, 2) + 4.603 * value - 0.096)); result.w = 255; return result; } //Round a / b to nearest higher integer value inline int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } //Align a to nearest higher multiple of b inline int iAlignUp(int a, int b) { return (a % b != 0) ? (a - a % b + b) : a; } // Calculate the magnitude squared of a complex number __host__ __device__ double complexMagnitudeSquared(hipDoubleComplex in){ return in.x*in.x + in.y*in.y; } // ***************************** // GPU Kernels //****************************** // Handles display mapping from hipDoubleComplex to uchar4 __global__ void display_psi(uchar4 *d_out, hipDoubleComplex *devPsi, double scale, int w, int h) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing double mag = complexMagnitudeSquared(devPsi[i]); d_out[i] = viridis(mag/scale); } void colormapKernelLauncher(uchar4 *d_out, hipDoubleComplex *devPsi, double scale, int w, int h) { const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); hipLaunchKernelGGL(( display_psi), dim3(gridSize), dim3(blockSize), 0, 0, d_out, devPsi, scale, w, h); } // Handles display mapping from hipDoubleComplex to RGBA int __global__ void movie_frame(int *d_buffer, hipDoubleComplex *devPsi, double scale, int w, int h) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing double mag = complexMagnitudeSquared(devPsi[i]); uchar4 d_out = viridis(mag/scale); d_buffer[i] = 65536 * d_out.z + 256 * d_out.y + d_out.x; } void movieFrameLauncher(int *d_buffer, hipDoubleComplex *devPsi, double scale, int w, int h) { const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); hipLaunchKernelGGL(( movie_frame), dim3(gridSize), dim3(blockSize), 0, 0, d_buffer, devPsi, scale, w, h); } // Multiply the wavefunction with a real-valued scalar __global__ void realmult_psi(hipDoubleComplex *devPsi, double mult, int w, int h) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing devPsi[i].x = devPsi[i].x * mult; devPsi[i].y = devPsi[i].y * mult; } void multKernelLauncher(hipDoubleComplex *devPsi, double mult, int w, int h){ const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); hipLaunchKernelGGL(( realmult_psi), dim3(gridSize), dim3(blockSize), 0, 0, devPsi, mult, w, h); } // Realspace evolution __global__ void realevolve_psi(hipDoubleComplex *devPsi, hipDoubleComplex *devExpPotential, hipDoubleComplex *out, double g, double dt, double useReal, double cooling, int w, int h){ const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing hipDoubleComplex tPotential = devExpPotential[i]; hipDoubleComplex tPsi = devPsi[i]; double gn = g * complexMagnitudeSquared(tPsi) * (dt / (2*HBAR)); hipDoubleComplex expgn; expgn.x = exp( -gn * cooling) * cos( -gn * useReal); expgn.y = exp( -gn * cooling) * sin( -gn * useReal); hipDoubleComplex realspaceUnitary = cuCmul(tPotential, expgn); out[i] = cuCmul(realspaceUnitary, tPsi); } void realspaceKernelLauncher(hipDoubleComplex *devPsi, hipDoubleComplex *devExpPotential, hipDoubleComplex *out, double g, double dt, double useReal, double cooling, int w, int h){ const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); hipLaunchKernelGGL(( realevolve_psi), dim3(gridSize), dim3(blockSize), 0, 0, devPsi, devExpPotential, out, g, dt, useReal, cooling, w, h); } // Time Varying Harmonic Potential __global__ void timevary_potential(hipDoubleComplex *devExpPotential, hipDoubleComplex *out, double *devXX, double *devYY, double mass, double o_0, double ep_0, double o, double ep, double dt, double useReal, double cooling, int w, int h) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing double coeff_x = 0.5 * mass * ((1-ep) * pow(o, 2) - (1-ep_0) * pow(o_0, 2)); double coeff_y = 0.5 * mass * ((1+ep) * pow(o, 2) - (1+ep_0) * pow(o_0, 2)); double potential = (coeff_x * devXX[i] + coeff_y * devYY[i]) * (dt / (2*HBAR)); hipDoubleComplex exp_potential; exp_potential.x = exp( -potential * cooling) * cos( -potential * useReal); exp_potential.y = exp( -potential * cooling) * sin( -potential * useReal); hipDoubleComplex exp_potential_0 = devExpPotential[i]; hipDoubleComplex exp_pot_dt; exp_pot_dt.x = exp_potential.x * exp_potential_0.x - exp_potential.y * exp_potential_0.y; exp_pot_dt.y = exp_potential.y * exp_potential_0.x + exp_potential.x * exp_potential_0.y; out[i] = exp_pot_dt; } void timevaryingKernelLauncher(hipDoubleComplex *devExpPotential, hipDoubleComplex *out, double *devXX, double *devYY, double mass, double o_0, double ep_0, double o, double ep, double dt, double useReal, double cooling, int w, int h) { const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); hipLaunchKernelGGL(( timevary_potential), dim3(gridSize), dim3(blockSize), 0, 0, devExpPotential, out, devXX, devYY, mass, o_0, ep_0, o, ep, dt, useReal, cooling, w, h); } // Momentum Space evolution __global__ void momentumevolve_psi(hipDoubleComplex *devPsi, hipDoubleComplex *devExpKinetic, hipDoubleComplex *out, int w, int h){ const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing hipDoubleComplex tKinetic = devExpKinetic[i]; hipDoubleComplex tPsi = devPsi[i]; out[i] = cuCmul(tKinetic, tPsi); } void momentumspaceKernelLauncher(hipDoubleComplex *devPsi, hipDoubleComplex *devExpKinetic, hipDoubleComplex *out, int w, int h){ const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); hipLaunchKernelGGL(( momentumevolve_psi), dim3(gridSize), dim3(blockSize), 0, 0, devPsi, devExpKinetic, out, w, h); } // Realspace evolution __global__ void gaugefield(double omegaR, double *devXkY, double *devYkX, hipDoubleComplex *devExpXkY, hipDoubleComplex *devExpYkX, double dt, double useReal, double cooling, int w, int h){ const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing devExpXkY[i].x = exp( -omegaR * devXkY[i] * cooling * dt) * cos( -omegaR * devXkY[i] * useReal * dt); devExpXkY[i].y = exp( -omegaR * devXkY[i] * cooling * dt) * sin( -omegaR * devXkY[i] * useReal * dt); devExpYkX[i].x = exp( -omegaR * devYkX[i] * cooling * dt) * cos( -omegaR * devYkX[i] * useReal * dt); devExpYkX[i].y = exp( -omegaR * devYkX[i] * cooling * dt) * sin( -omegaR * devYkX[i] * useReal * dt); } void gaugefieldKernelLauncher(double omegaR, double *devXkY, double *devYkX, hipDoubleComplex *devExpXkY, hipDoubleComplex *devExpYkX, double dt, double useReal, double cooling, int w, int h){ const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); hipLaunchKernelGGL(( gaugefield), dim3(gridSize), dim3(blockSize), 0, 0, omegaR, devXkY, devYkX, devExpXkY, devExpYkX, dt, useReal, cooling, w, h); } // Density psi __global__ void density_psi(hipDoubleComplex *devPsi, double *density, int w, int h) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing density[i] = complexMagnitudeSquared(devPsi[i]); } //Normalization __global__ void scalarDiv_wfcNorm(hipDoubleComplex *in, double dr, double* pSum, hipDoubleComplex *out, int w, int h){ const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing hipDoubleComplex result; double norm = sqrt((pSum[0])*dr); result.x = (in[i].x/norm); result.y = (in[i].y/norm); out[i] = result; } /** * Routine for parallel summation. Can be looped over from host. * From GPUE-group (https://github.com/GPUE-group/GPUE) */ __global__ void multipass(double* input, double* output){ unsigned int tid = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z * blockDim.x * blockDim.y; unsigned int bid = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; unsigned int gid = bid * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.y * blockDim.x) + (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x; extern __shared__ double sdatad[]; sdatad[tid] = input[gid]; __syncthreads(); for(int i = blockDim.x>>1; i > 0; i>>=1){ if(tid < i){ sdatad[tid] += sdatad[tid + i]; } __syncthreads(); } if(tid==0){ output[bid] = sdatad[0]; } } /* * General-purpose summation of an array on the gpu, storing the result in the first element */ void gpuReduce(double* data, int length, int threadCount) { dim3 block(length / threadCount, 1, 1); dim3 threads(threadCount, 1, 1); while((double)length/threadCount > 1.0){ hipLaunchKernelGGL(( multipass), dim3(block),dim3(threads),threadCount*sizeof(double), 0, &data[0], &data[0]); length /= threadCount; block = (int) ceil((double)length/threadCount); } hipLaunchKernelGGL(( multipass), dim3(1),dim3(length),threadCount*sizeof(double), 0, &data[0], &data[0]); } void parSum(hipDoubleComplex *devPsi, double *density, double dx, int w, int h){ int DS = w * h; double dg = dx * dx; dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); dim3 grid_tmp(DS, 1, 1); dim3 threads(TILEX, TILEY); dim3 block(grid_tmp.x/threads.x, 1, 1); hipLaunchKernelGGL(( density_psi), dim3(gridSize), dim3(threads), 0, 0, devPsi, density, w, h); gpuReduce(density, grid_tmp.x, threads.x); /* // Writing out in the parSum Function (not recommended, for debugging) double *sum; sum = (double *) malloc(sizeof(double)*gsize); hipMemcpy(sum,density,sizeof(double)*gsize, hipMemcpyDeviceToHost); std::cout << (sum[0]) << '\n'; */ hipLaunchKernelGGL(( scalarDiv_wfcNorm), dim3(gridSize), dim3(threads), 0, 0, devPsi, dg, density, devPsi, w, h); } // Spoon __global__ void spoonKernel(double *devPotential, hipDoubleComplex *devExpPotential, spoonProps spoonP, double dt, double useReal, double cooling, int w, int h){ const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing const int dist = (tidx - spoonP.pos.x) * (tidx - spoonP.pos.x) + (tidy - spoonP.pos.y) * (tidy - spoonP.pos.y); double spoon = spoonP.strength * exp( - pow(dist, 2) / pow(spoonP.radius, 2) ); double devPotSpoon = devPotential[i]; devPotSpoon += spoon; devExpPotential[i].x = exp( -devPotSpoon * cooling * dt/(2*HBAR)) * cos( -devPotSpoon * useReal * dt/(2*HBAR)); devExpPotential[i].y = exp( -devPotSpoon * cooling * dt/(2*HBAR)) * sin( -devPotSpoon * useReal * dt/(2*HBAR)); } void spoonKernelLauncher(double *devPotential, hipDoubleComplex *devExpPotential, spoonProps spoonP, double dt, double useReal, double cooling, int w, int h) { const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); hipLaunchKernelGGL(( spoonKernel), dim3(gridSize), dim3(blockSize), 0, 0, devPotential, devExpPotential, spoonP, dt, useReal, cooling, w, h); }
8b9a5ce49e157638d4cee8d3f5246fd7849f4c01.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cufft.h> // CUDA FFT Libraries #include "defines.h" #include "gp_kernels.h" #include "chamber.hpp" // ***************************** // Useful functions //****************************** __device__ unsigned char clip(double x) {return x > 255 ? 255 : (x < 0 ? 0 : x); } __device__ uchar4 viridis(double value) { uchar4 result; result.x = clip(255 * ( 2.854 * pow(value, 3) - 2.098 * pow(value, 2) + 0.037 * value + 0.254)); result.y = clip(255 * (-0.176 * pow(value, 3) - 0.167 * pow(value, 2) + 1.243 * value + 0.016)); result.z = clip(255 * ( 0.261 * pow(value, 3) - 1.833 * pow(value, 2) + 1.275 * value + 0.309)); result.w = 255; return result; } __device__ uchar4 inferno(double value) { uchar4 result; result.x = clip(255 * (-1.760 * pow(value, 3) + 1.487 * pow(value, 2) + 1.223 * value - 0.034)); result.y = clip(255 * ( 0.284 * pow(value, 3) - 0.827 * pow(value, 2) - 0.086 * value + 0.026)); result.z = clip(255 * ( 7.533 * pow(value, 3) - 11.435 * pow(value, 2) + 4.603 * value - 0.096)); result.w = 255; return result; } //Round a / b to nearest higher integer value inline int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } //Align a to nearest higher multiple of b inline int iAlignUp(int a, int b) { return (a % b != 0) ? (a - a % b + b) : a; } // Calculate the magnitude squared of a complex number __host__ __device__ double complexMagnitudeSquared(cuDoubleComplex in){ return in.x*in.x + in.y*in.y; } // ***************************** // GPU Kernels //****************************** // Handles display mapping from cuDoubleComplex to uchar4 __global__ void display_psi(uchar4 *d_out, cuDoubleComplex *devPsi, double scale, int w, int h) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing double mag = complexMagnitudeSquared(devPsi[i]); d_out[i] = viridis(mag/scale); } void colormapKernelLauncher(uchar4 *d_out, cuDoubleComplex *devPsi, double scale, int w, int h) { const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); display_psi<<<gridSize, blockSize>>>(d_out, devPsi, scale, w, h); } // Handles display mapping from cuDoubleComplex to RGBA int __global__ void movie_frame(int *d_buffer, cuDoubleComplex *devPsi, double scale, int w, int h) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing double mag = complexMagnitudeSquared(devPsi[i]); uchar4 d_out = viridis(mag/scale); d_buffer[i] = 65536 * d_out.z + 256 * d_out.y + d_out.x; } void movieFrameLauncher(int *d_buffer, cuDoubleComplex *devPsi, double scale, int w, int h) { const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); movie_frame<<<gridSize, blockSize>>>(d_buffer, devPsi, scale, w, h); } // Multiply the wavefunction with a real-valued scalar __global__ void realmult_psi(cuDoubleComplex *devPsi, double mult, int w, int h) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing devPsi[i].x = devPsi[i].x * mult; devPsi[i].y = devPsi[i].y * mult; } void multKernelLauncher(cuDoubleComplex *devPsi, double mult, int w, int h){ const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); realmult_psi<<<gridSize, blockSize>>>(devPsi, mult, w, h); } // Realspace evolution __global__ void realevolve_psi(cuDoubleComplex *devPsi, cuDoubleComplex *devExpPotential, cuDoubleComplex *out, double g, double dt, double useReal, double cooling, int w, int h){ const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing cuDoubleComplex tPotential = devExpPotential[i]; cuDoubleComplex tPsi = devPsi[i]; double gn = g * complexMagnitudeSquared(tPsi) * (dt / (2*HBAR)); cuDoubleComplex expgn; expgn.x = exp( -gn * cooling) * cos( -gn * useReal); expgn.y = exp( -gn * cooling) * sin( -gn * useReal); cuDoubleComplex realspaceUnitary = cuCmul(tPotential, expgn); out[i] = cuCmul(realspaceUnitary, tPsi); } void realspaceKernelLauncher(cuDoubleComplex *devPsi, cuDoubleComplex *devExpPotential, cuDoubleComplex *out, double g, double dt, double useReal, double cooling, int w, int h){ const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); realevolve_psi<<<gridSize, blockSize>>>(devPsi, devExpPotential, out, g, dt, useReal, cooling, w, h); } // Time Varying Harmonic Potential __global__ void timevary_potential(cuDoubleComplex *devExpPotential, cuDoubleComplex *out, double *devXX, double *devYY, double mass, double o_0, double ep_0, double o, double ep, double dt, double useReal, double cooling, int w, int h) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing double coeff_x = 0.5 * mass * ((1-ep) * pow(o, 2) - (1-ep_0) * pow(o_0, 2)); double coeff_y = 0.5 * mass * ((1+ep) * pow(o, 2) - (1+ep_0) * pow(o_0, 2)); double potential = (coeff_x * devXX[i] + coeff_y * devYY[i]) * (dt / (2*HBAR)); cuDoubleComplex exp_potential; exp_potential.x = exp( -potential * cooling) * cos( -potential * useReal); exp_potential.y = exp( -potential * cooling) * sin( -potential * useReal); cuDoubleComplex exp_potential_0 = devExpPotential[i]; cuDoubleComplex exp_pot_dt; exp_pot_dt.x = exp_potential.x * exp_potential_0.x - exp_potential.y * exp_potential_0.y; exp_pot_dt.y = exp_potential.y * exp_potential_0.x + exp_potential.x * exp_potential_0.y; out[i] = exp_pot_dt; } void timevaryingKernelLauncher(cuDoubleComplex *devExpPotential, cuDoubleComplex *out, double *devXX, double *devYY, double mass, double o_0, double ep_0, double o, double ep, double dt, double useReal, double cooling, int w, int h) { const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); timevary_potential<<<gridSize, blockSize>>>(devExpPotential, out, devXX, devYY, mass, o_0, ep_0, o, ep, dt, useReal, cooling, w, h); } // Momentum Space evolution __global__ void momentumevolve_psi(cuDoubleComplex *devPsi, cuDoubleComplex *devExpKinetic, cuDoubleComplex *out, int w, int h){ const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing cuDoubleComplex tKinetic = devExpKinetic[i]; cuDoubleComplex tPsi = devPsi[i]; out[i] = cuCmul(tKinetic, tPsi); } void momentumspaceKernelLauncher(cuDoubleComplex *devPsi, cuDoubleComplex *devExpKinetic, cuDoubleComplex *out, int w, int h){ const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); momentumevolve_psi<<<gridSize, blockSize>>>(devPsi, devExpKinetic, out, w, h); } // Realspace evolution __global__ void gaugefield(double omegaR, double *devXkY, double *devYkX, cuDoubleComplex *devExpXkY, cuDoubleComplex *devExpYkX, double dt, double useReal, double cooling, int w, int h){ const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing devExpXkY[i].x = exp( -omegaR * devXkY[i] * cooling * dt) * cos( -omegaR * devXkY[i] * useReal * dt); devExpXkY[i].y = exp( -omegaR * devXkY[i] * cooling * dt) * sin( -omegaR * devXkY[i] * useReal * dt); devExpYkX[i].x = exp( -omegaR * devYkX[i] * cooling * dt) * cos( -omegaR * devYkX[i] * useReal * dt); devExpYkX[i].y = exp( -omegaR * devYkX[i] * cooling * dt) * sin( -omegaR * devYkX[i] * useReal * dt); } void gaugefieldKernelLauncher(double omegaR, double *devXkY, double *devYkX, cuDoubleComplex *devExpXkY, cuDoubleComplex *devExpYkX, double dt, double useReal, double cooling, int w, int h){ const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); gaugefield<<<gridSize, blockSize>>>(omegaR, devXkY, devYkX, devExpXkY, devExpYkX, dt, useReal, cooling, w, h); } // Density psi __global__ void density_psi(cuDoubleComplex *devPsi, double *density, int w, int h) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing density[i] = complexMagnitudeSquared(devPsi[i]); } //Normalization __global__ void scalarDiv_wfcNorm(cuDoubleComplex *in, double dr, double* pSum, cuDoubleComplex *out, int w, int h){ const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing cuDoubleComplex result; double norm = sqrt((pSum[0])*dr); result.x = (in[i].x/norm); result.y = (in[i].y/norm); out[i] = result; } /** * Routine for parallel summation. Can be looped over from host. * From GPUE-group (https://github.com/GPUE-group/GPUE) */ __global__ void multipass(double* input, double* output){ unsigned int tid = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z * blockDim.x * blockDim.y; unsigned int bid = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; unsigned int gid = bid * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.y * blockDim.x) + (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x; extern __shared__ double sdatad[]; sdatad[tid] = input[gid]; __syncthreads(); for(int i = blockDim.x>>1; i > 0; i>>=1){ if(tid < i){ sdatad[tid] += sdatad[tid + i]; } __syncthreads(); } if(tid==0){ output[bid] = sdatad[0]; } } /* * General-purpose summation of an array on the gpu, storing the result in the first element */ void gpuReduce(double* data, int length, int threadCount) { dim3 block(length / threadCount, 1, 1); dim3 threads(threadCount, 1, 1); while((double)length/threadCount > 1.0){ multipass<<<block,threads,threadCount*sizeof(double)>>>(&data[0], &data[0]); length /= threadCount; block = (int) ceil((double)length/threadCount); } multipass<<<1,length,threadCount*sizeof(double)>>>(&data[0], &data[0]); } void parSum(cuDoubleComplex *devPsi, double *density, double dx, int w, int h){ int DS = w * h; double dg = dx * dx; dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); dim3 grid_tmp(DS, 1, 1); dim3 threads(TILEX, TILEY); dim3 block(grid_tmp.x/threads.x, 1, 1); density_psi<<<gridSize, threads>>>(devPsi, density, w, h); gpuReduce(density, grid_tmp.x, threads.x); /* // Writing out in the parSum Function (not recommended, for debugging) double *sum; sum = (double *) malloc(sizeof(double)*gsize); cudaMemcpy(sum,density,sizeof(double)*gsize, cudaMemcpyDeviceToHost); std::cout << (sum[0]) << '\n'; */ scalarDiv_wfcNorm<<<gridSize, threads>>>(devPsi, dg, density, devPsi, w, h); } // Spoon __global__ void spoonKernel(double *devPotential, cuDoubleComplex *devExpPotential, spoonProps spoonP, double dt, double useReal, double cooling, int w, int h){ const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int tidy = blockIdx.y*blockDim.y + threadIdx.y; if ((tidx >= w) || (tidy >= h)) return; // Check if in bounds const int i = tidx + tidy * w; // 1D indexing const int dist = (tidx - spoonP.pos.x) * (tidx - spoonP.pos.x) + (tidy - spoonP.pos.y) * (tidy - spoonP.pos.y); double spoon = spoonP.strength * exp( - pow(dist, 2) / pow(spoonP.radius, 2) ); double devPotSpoon = devPotential[i]; devPotSpoon += spoon; devExpPotential[i].x = exp( -devPotSpoon * cooling * dt/(2*HBAR)) * cos( -devPotSpoon * useReal * dt/(2*HBAR)); devExpPotential[i].y = exp( -devPotSpoon * cooling * dt/(2*HBAR)) * sin( -devPotSpoon * useReal * dt/(2*HBAR)); } void spoonKernelLauncher(double *devPotential, cuDoubleComplex *devExpPotential, spoonProps spoonP, double dt, double useReal, double cooling, int w, int h) { const dim3 gridSize (iDivUp(w, TILEX), iDivUp(h, TILEY)); const dim3 blockSize(TILEX, TILEY); spoonKernel<<<gridSize, blockSize>>>(devPotential, devExpPotential, spoonP, dt, useReal, cooling, w, h); }
3441bde9a7e9dc86ef12f485c300276e730761a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zbajac_csr_overlap.cu, normal z -> s, Wed Jan 2 14:18:53 2019 */ #include "magmasparse_internal.h" #define PRECISION_s #define BLOCKSIZE 256 __global__ void magma_sk_testLocking(unsigned int* locks, int n) { int id = threadIdx.x % n; bool leaveLoop = false; while (!leaveLoop) { if (atomicExch(&(locks[id]), 1u) == 0u) { //critical section leaveLoop = true; atomicExch(&(locks[id]),0u); } } } /* __global__ void magma_sbajac_csr_o_ls_kernel(int localiters, int n, int matrices, int overlap, magma_s_matrix *D, magma_s_matrix *R, const float * __restrict__ b, float * x ) { // int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; // int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; __shared__ float local_x[ BLOCKSIZE ]; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; float *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; //valR = R[ (1+blockIdx.x-1)%matrices ].dval; //colR = R[ (1+blockIdx.x-1)%matrices ].dcol; //rowR = R[ (1+blockIdx.x-1)%matrices ].drow; //valD = D[ (1+blockIdx.x-1)%matrices ].dval; //colD = D[ (1+blockIdx.x-1)%matrices ].dcol; //rowD = D[ (1+blockIdx.x-1)%matrices ].drow; if (blockIdx.x%2 == 1) { valR = R[0].dval; valD = D[0].dval; colR = R[0].dcol; rowR = R[0].drow; colD = D[0].dcol; rowD = D[0].drow; } else { valR = R[1].dval; valD = D[1].dval; colR = R[1].dcol; rowR = R[1].drow; colD = D[1].dcol; rowD = D[1].drow; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; printf("bdx:%d idx:%d start:%d end:%d\n", blockIdx.x, threadIdx.x, start, end); #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for (i = start; i < end; i++) v += valR[i] * x[ colR[i] ]; start = rowD[index]; end = rowD[index+1]; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations local_x[threadIdx.x] = x[index]; //+ ( v - tmp); // / (valD[start]); __syncthreads(); #pragma unroll for (j = 0; j < localiters-1; j++) { tmp = zero; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if (threadIdx.x > overlap) { // RAS x[index] = local_x[threadIdx.x]; } } } */ __global__ void magma_sbajac_csr_o_ls_kernel1(int localiters, int n, int matrices, int overlap, float * valD, magma_index_t * rowD, magma_index_t * colD, float * valR, magma_index_t * rowR, magma_index_t * colR, const float * __restrict__ b, float * x ) { int inddiag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; //bool leaveLoop = false; if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_sbajac_csr_o_ls_kernel2(int localiters, int n, int matrices, int overlap, float * valD0, magma_index_t * rowD0, magma_index_t * colD0, float * valR0, magma_index_t * rowR0, magma_index_t * colR0, float * valD1, magma_index_t * rowD1, magma_index_t * colD1, float * valR1, magma_index_t * rowR1, magma_index_t * colR1, const float * __restrict__ b, float * x ) { int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; //bool leaveLoop = false; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; float *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if (blockIdx.x%matrices == 0) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if (blockIdx.x%matrices == 1) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_sbajac_csr_o_ls_kernel4(int localiters, int n, int matrices, int overlap, float * valD0, magma_index_t * rowD0, magma_index_t * colD0, float * valR0, magma_index_t * rowR0, magma_index_t * colR0, float * valD1, magma_index_t * rowD1, magma_index_t * colD1, float * valR1, magma_index_t * rowR1, magma_index_t * colR1, float * valD2, magma_index_t * rowD2, magma_index_t * colD2, float * valR2, magma_index_t * rowR2, magma_index_t * colR2, float * valD3, magma_index_t * rowD3, magma_index_t * colD3, float * valR3, magma_index_t * rowR3, magma_index_t * colR3, const float * __restrict__ b, float * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; //bool leaveLoop = false; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; float *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==1 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==2 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==3 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_sbajac_csr_o_ls_kernel8(int localiters, int n, int matrices, int overlap, float * valD0, magma_index_t * rowD0, magma_index_t * colD0, float * valR0, magma_index_t * rowR0, magma_index_t * colR0, float * valD1, magma_index_t * rowD1, magma_index_t * colD1, float * valR1, magma_index_t * rowR1, magma_index_t * colR1, float * valD2, magma_index_t * rowD2, magma_index_t * colD2, float * valR2, magma_index_t * rowR2, magma_index_t * colR2, float * valD3, magma_index_t * rowD3, magma_index_t * colD3, float * valR3, magma_index_t * rowR3, magma_index_t * colR3, float * valD4, magma_index_t * rowD4, magma_index_t * colD4, float * valR4, magma_index_t * rowR4, magma_index_t * colR4, float * valD5, magma_index_t * rowD5, magma_index_t * colD5, float * valR5, magma_index_t * rowR5, magma_index_t * colR5, float * valD6, magma_index_t * rowD6, magma_index_t * colD6, float * valR6, magma_index_t * rowR6, magma_index_t * colR6, float * valD7, magma_index_t * rowD7, magma_index_t * colD7, float * valR7, magma_index_t * rowR7, magma_index_t * colR7, const float * __restrict__ b, float * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; float *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if( blockIdx.x%matrices==0 ){ valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }else if ( blockIdx.x%matrices==1 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }else if ( blockIdx.x%matrices==2 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }else if ( blockIdx.x%matrices==3 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }else if ( blockIdx.x%matrices==4 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==5 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==6 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==7 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_sbajac_csr_o_ls_kernel16(int localiters, int n, int matrices, int overlap, float *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , float *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , float *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , float *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , float *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , float *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , float *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , float *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , float *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , float *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , float *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , float *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , float *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , float *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , float *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , float *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , float *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , float *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , float *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , float *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , float *valD10, magma_index_t *rowD10, magma_index_t *colD10, float *valR10, magma_index_t *rowR10, magma_index_t *colR10, float *valD11, magma_index_t *rowD11, magma_index_t *colD11, float *valR11, magma_index_t *rowR11, magma_index_t *colR11, float *valD12, magma_index_t *rowD12, magma_index_t *colD12, float *valR12, magma_index_t *rowR12, magma_index_t *colR12, float *valD13, magma_index_t *rowD13, magma_index_t *colD13, float *valR13, magma_index_t *rowR13, magma_index_t *colR13, float *valD14, magma_index_t *rowD14, magma_index_t *colD14, float *valR14, magma_index_t *rowR14, magma_index_t *colR14, float *valD15, magma_index_t *rowD15, magma_index_t *colD15, float *valR15, magma_index_t *rowR15, magma_index_t *colR15, const float * __restrict__ b, float * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; float *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==1 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==2 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==3 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==4 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==5 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==6 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==7 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==8 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==9 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==10 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==11 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==12 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==13 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==14 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==15 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_sbajac_csr_o_ls_kernel32(int localiters, int n, int matrices, int overlap, float *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , float *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , float *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , float *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , float *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , float *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , float *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , float *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , float *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , float *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , float *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , float *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , float *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , float *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , float *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , float *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , float *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , float *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , float *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , float *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , float *valD10, magma_index_t *rowD10, magma_index_t *colD10, float *valR10, magma_index_t *rowR10, magma_index_t *colR10, float *valD11, magma_index_t *rowD11, magma_index_t *colD11, float *valR11, magma_index_t *rowR11, magma_index_t *colR11, float *valD12, magma_index_t *rowD12, magma_index_t *colD12, float *valR12, magma_index_t *rowR12, magma_index_t *colR12, float *valD13, magma_index_t *rowD13, magma_index_t *colD13, float *valR13, magma_index_t *rowR13, magma_index_t *colR13, float *valD14, magma_index_t *rowD14, magma_index_t *colD14, float *valR14, magma_index_t *rowR14, magma_index_t *colR14, float *valD15, magma_index_t *rowD15, magma_index_t *colD15, float *valR15, magma_index_t *rowR15, magma_index_t *colR15, float *valD16, magma_index_t *rowD16, magma_index_t *colD16, float *valR16, magma_index_t *rowR16, magma_index_t *colR16, float *valD17, magma_index_t *rowD17, magma_index_t *colD17, float *valR17, magma_index_t *rowR17, magma_index_t *colR17, float *valD18, magma_index_t *rowD18, magma_index_t *colD18, float *valR18, magma_index_t *rowR18, magma_index_t *colR18, float *valD19, magma_index_t *rowD19, magma_index_t *colD19, float *valR19, magma_index_t *rowR19, magma_index_t *colR19, float *valD20, magma_index_t *rowD20, magma_index_t *colD20, float *valR20, magma_index_t *rowR20, magma_index_t *colR20, float *valD21, magma_index_t *rowD21, magma_index_t *colD21, float *valR21, magma_index_t *rowR21, magma_index_t *colR21, float *valD22, magma_index_t *rowD22, magma_index_t *colD22, float *valR22, magma_index_t *rowR22, magma_index_t *colR22, float *valD23, magma_index_t *rowD23, magma_index_t *colD23, float *valR23, magma_index_t *rowR23, magma_index_t *colR23, float *valD24, magma_index_t *rowD24, magma_index_t *colD24, float *valR24, magma_index_t *rowR24, magma_index_t *colR24, float *valD25, magma_index_t *rowD25, magma_index_t *colD25, float *valR25, magma_index_t *rowR25, magma_index_t *colR25, float *valD26, magma_index_t *rowD26, magma_index_t *colD26, float *valR26, magma_index_t *rowR26, magma_index_t *colR26, float *valD27, magma_index_t *rowD27, magma_index_t *colD27, float *valR27, magma_index_t *rowR27, magma_index_t *colR27, float *valD28, magma_index_t *rowD28, magma_index_t *colD28, float *valR28, magma_index_t *rowR28, magma_index_t *colR28, float *valD29, magma_index_t *rowD29, magma_index_t *colD29, float *valR29, magma_index_t *rowR29, magma_index_t *colR29, float *valD30, magma_index_t *rowD30, magma_index_t *colD30, float *valR30, magma_index_t *rowR30, magma_index_t *colR30, float *valD31, magma_index_t *rowD31, magma_index_t *colD31, float *valR31, magma_index_t *rowR31, magma_index_t *colR31, const float * __restrict__ b, float * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; float *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==1 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==2 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==3 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==4 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==5 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==6 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==7 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==8 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==9 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==10 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==11 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==12 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==13 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==14 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==15 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==16 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==17 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==18 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==19 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==20 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==21 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==22 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==23 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==24 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==25 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==26 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==27 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==28 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==29 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==30 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==31 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_sbajac_csr_o_ls_kernel64(int localiters, int n, int matrices, int overlap, float *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , float *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , float *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , float *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , float *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , float *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , float *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , float *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , float *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , float *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , float *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , float *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , float *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , float *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , float *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , float *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , float *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , float *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , float *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , float *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , float *valD10, magma_index_t *rowD10, magma_index_t *colD10, float *valR10, magma_index_t *rowR10, magma_index_t *colR10, float *valD11, magma_index_t *rowD11, magma_index_t *colD11, float *valR11, magma_index_t *rowR11, magma_index_t *colR11, float *valD12, magma_index_t *rowD12, magma_index_t *colD12, float *valR12, magma_index_t *rowR12, magma_index_t *colR12, float *valD13, magma_index_t *rowD13, magma_index_t *colD13, float *valR13, magma_index_t *rowR13, magma_index_t *colR13, float *valD14, magma_index_t *rowD14, magma_index_t *colD14, float *valR14, magma_index_t *rowR14, magma_index_t *colR14, float *valD15, magma_index_t *rowD15, magma_index_t *colD15, float *valR15, magma_index_t *rowR15, magma_index_t *colR15, float *valD16, magma_index_t *rowD16, magma_index_t *colD16, float *valR16, magma_index_t *rowR16, magma_index_t *colR16, float *valD17, magma_index_t *rowD17, magma_index_t *colD17, float *valR17, magma_index_t *rowR17, magma_index_t *colR17, float *valD18, magma_index_t *rowD18, magma_index_t *colD18, float *valR18, magma_index_t *rowR18, magma_index_t *colR18, float *valD19, magma_index_t *rowD19, magma_index_t *colD19, float *valR19, magma_index_t *rowR19, magma_index_t *colR19, float *valD20, magma_index_t *rowD20, magma_index_t *colD20, float *valR20, magma_index_t *rowR20, magma_index_t *colR20, float *valD21, magma_index_t *rowD21, magma_index_t *colD21, float *valR21, magma_index_t *rowR21, magma_index_t *colR21, float *valD22, magma_index_t *rowD22, magma_index_t *colD22, float *valR22, magma_index_t *rowR22, magma_index_t *colR22, float *valD23, magma_index_t *rowD23, magma_index_t *colD23, float *valR23, magma_index_t *rowR23, magma_index_t *colR23, float *valD24, magma_index_t *rowD24, magma_index_t *colD24, float *valR24, magma_index_t *rowR24, magma_index_t *colR24, float *valD25, magma_index_t *rowD25, magma_index_t *colD25, float *valR25, magma_index_t *rowR25, magma_index_t *colR25, float *valD26, magma_index_t *rowD26, magma_index_t *colD26, float *valR26, magma_index_t *rowR26, magma_index_t *colR26, float *valD27, magma_index_t *rowD27, magma_index_t *colD27, float *valR27, magma_index_t *rowR27, magma_index_t *colR27, float *valD28, magma_index_t *rowD28, magma_index_t *colD28, float *valR28, magma_index_t *rowR28, magma_index_t *colR28, float *valD29, magma_index_t *rowD29, magma_index_t *colD29, float *valR29, magma_index_t *rowR29, magma_index_t *colR29, float *valD30, magma_index_t *rowD30, magma_index_t *colD30, float *valR30, magma_index_t *rowR30, magma_index_t *colR30, float *valD31, magma_index_t *rowD31, magma_index_t *colD31, float *valR31, magma_index_t *rowR31, magma_index_t *colR31, float *valD32, magma_index_t *rowD32, magma_index_t *colD32, float *valR32, magma_index_t *rowR32, magma_index_t *colR32, float *valD33, magma_index_t *rowD33, magma_index_t *colD33, float *valR33, magma_index_t *rowR33, magma_index_t *colR33, float *valD34, magma_index_t *rowD34, magma_index_t *colD34, float *valR34, magma_index_t *rowR34, magma_index_t *colR34, float *valD35, magma_index_t *rowD35, magma_index_t *colD35, float *valR35, magma_index_t *rowR35, magma_index_t *colR35, float *valD36, magma_index_t *rowD36, magma_index_t *colD36, float *valR36, magma_index_t *rowR36, magma_index_t *colR36, float *valD37, magma_index_t *rowD37, magma_index_t *colD37, float *valR37, magma_index_t *rowR37, magma_index_t *colR37, float *valD38, magma_index_t *rowD38, magma_index_t *colD38, float *valR38, magma_index_t *rowR38, magma_index_t *colR38, float *valD39, magma_index_t *rowD39, magma_index_t *colD39, float *valR39, magma_index_t *rowR39, magma_index_t *colR39, float *valD40, magma_index_t *rowD40, magma_index_t *colD40, float *valR40, magma_index_t *rowR40, magma_index_t *colR40, float *valD41, magma_index_t *rowD41, magma_index_t *colD41, float *valR41, magma_index_t *rowR41, magma_index_t *colR41, float *valD42, magma_index_t *rowD42, magma_index_t *colD42, float *valR42, magma_index_t *rowR42, magma_index_t *colR42, float *valD43, magma_index_t *rowD43, magma_index_t *colD43, float *valR43, magma_index_t *rowR43, magma_index_t *colR43, float *valD44, magma_index_t *rowD44, magma_index_t *colD44, float *valR44, magma_index_t *rowR44, magma_index_t *colR44, float *valD45, magma_index_t *rowD45, magma_index_t *colD45, float *valR45, magma_index_t *rowR45, magma_index_t *colR45, float *valD46, magma_index_t *rowD46, magma_index_t *colD46, float *valR46, magma_index_t *rowR46, magma_index_t *colR46, float *valD47, magma_index_t *rowD47, magma_index_t *colD47, float *valR47, magma_index_t *rowR47, magma_index_t *colR47, float *valD48, magma_index_t *rowD48, magma_index_t *colD48, float *valR48, magma_index_t *rowR48, magma_index_t *colR48, float *valD49, magma_index_t *rowD49, magma_index_t *colD49, float *valR49, magma_index_t *rowR49, magma_index_t *colR49, float *valD50, magma_index_t *rowD50, magma_index_t *colD50, float *valR50, magma_index_t *rowR50, magma_index_t *colR50, float *valD51, magma_index_t *rowD51, magma_index_t *colD51, float *valR51, magma_index_t *rowR51, magma_index_t *colR51, float *valD52, magma_index_t *rowD52, magma_index_t *colD52, float *valR52, magma_index_t *rowR52, magma_index_t *colR52, float *valD53, magma_index_t *rowD53, magma_index_t *colD53, float *valR53, magma_index_t *rowR53, magma_index_t *colR53, float *valD54, magma_index_t *rowD54, magma_index_t *colD54, float *valR54, magma_index_t *rowR54, magma_index_t *colR54, float *valD55, magma_index_t *rowD55, magma_index_t *colD55, float *valR55, magma_index_t *rowR55, magma_index_t *colR55, float *valD56, magma_index_t *rowD56, magma_index_t *colD56, float *valR56, magma_index_t *rowR56, magma_index_t *colR56, float *valD57, magma_index_t *rowD57, magma_index_t *colD57, float *valR57, magma_index_t *rowR57, magma_index_t *colR57, float *valD58, magma_index_t *rowD58, magma_index_t *colD58, float *valR58, magma_index_t *rowR58, magma_index_t *colR58, float *valD59, magma_index_t *rowD59, magma_index_t *colD59, float *valR59, magma_index_t *rowR59, magma_index_t *colR59, float *valD60, magma_index_t *rowD60, magma_index_t *colD60, float *valR60, magma_index_t *rowR60, magma_index_t *colR60, float *valD61, magma_index_t *rowD61, magma_index_t *colD61, float *valR61, magma_index_t *rowR61, magma_index_t *colR61, float *valD62, magma_index_t *rowD62, magma_index_t *colD62, float *valR62, magma_index_t *rowR62, magma_index_t *colR62, float *valD63, magma_index_t *rowD63, magma_index_t *colD63, float *valR63, magma_index_t *rowR63, magma_index_t *colR63, const float * __restrict__ b, float * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; float *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR63; valD = valD63; colR = colR63; rowR = rowR63; colD = colD63; rowD = rowD63; } else if ( blockIdx.x%matrices==1 ) { valR = valR62; valD = valD62; colR = colR62; rowR = rowR62; colD = colD62; rowD = rowD62; } else if ( blockIdx.x%matrices==2 ) { valR = valR61; valD = valD61; colR = colR61; rowR = rowR61; colD = colD61; rowD = rowD61; } else if ( blockIdx.x%matrices==3 ) { valR = valR60; valD = valD60; colR = colR60; rowR = rowR60; colD = colD60; rowD = rowD60; } else if ( blockIdx.x%matrices==4 ) { valR = valR59; valD = valD59; colR = colR59; rowR = rowR59; colD = colD59; rowD = rowD59; } else if ( blockIdx.x%matrices==5 ) { valR = valR58; valD = valD58; colR = colR58; rowR = rowR58; colD = colD58; rowD = rowD58; } else if ( blockIdx.x%matrices==6 ) { valR = valR57; valD = valD57; colR = colR57; rowR = rowR57; colD = colD57; rowD = rowD57; } else if ( blockIdx.x%matrices==7 ) { valR = valR56; valD = valD56; colR = colR56; rowR = rowR56; colD = colD56; rowD = rowD56; } else if ( blockIdx.x%matrices==8 ) { valR = valR55; valD = valD55; colR = colR55; rowR = rowR55; colD = colD55; rowD = rowD55; } else if ( blockIdx.x%matrices==9 ) { valR = valR54; valD = valD54; colR = colR54; rowR = rowR54; colD = colD54; rowD = rowD54; } else if ( blockIdx.x%matrices==10 ) { valR = valR53; valD = valD53; colR = colR53; rowR = rowR53; colD = colD53; rowD = rowD53; } else if ( blockIdx.x%matrices==11 ) { valR = valR52; valD = valD52; colR = colR52; rowR = rowR52; colD = colD52; rowD = rowD52; } else if ( blockIdx.x%matrices==12 ) { valR = valR51; valD = valD51; colR = colR51; rowR = rowR51; colD = colD51; rowD = rowD51; } else if ( blockIdx.x%matrices==13 ) { valR = valR50; valD = valD50; colR = colR50; rowR = rowR50; colD = colD50; rowD = rowD50; } else if ( blockIdx.x%matrices==14 ) { valR = valR49; valD = valD49; colR = colR49; rowR = rowR49; colD = colD49; rowD = rowD49; } else if ( blockIdx.x%matrices==15 ) { valR = valR48; valD = valD48; colR = colR48; rowR = rowR48; colD = colD48; rowD = rowD48; } else if ( blockIdx.x%matrices==16 ) { valR = valR47; valD = valD47; colR = colR47; rowR = rowR47; colD = colD47; rowD = rowD47; } else if ( blockIdx.x%matrices==17 ) { valR = valR46; valD = valD46; colR = colR46; rowR = rowR46; colD = colD46; rowD = rowD46; } else if ( blockIdx.x%matrices==18 ) { valR = valR45; valD = valD45; colR = colR45; rowR = rowR45; colD = colD45; rowD = rowD45; } else if ( blockIdx.x%matrices==19 ) { valR = valR44; valD = valD44; colR = colR44; rowR = rowR44; colD = colD44; rowD = rowD44; } else if ( blockIdx.x%matrices==20 ) { valR = valR43; valD = valD43; colR = colR43; rowR = rowR43; colD = colD43; rowD = rowD43; } else if ( blockIdx.x%matrices==21 ) { valR = valR42; valD = valD42; colR = colR42; rowR = rowR42; colD = colD42; rowD = rowD42; } else if ( blockIdx.x%matrices==22 ) { valR = valR41; valD = valD41; colR = colR41; rowR = rowR41; colD = colD41; rowD = rowD41; } else if ( blockIdx.x%matrices==23 ) { valR = valR40; valD = valD40; colR = colR40; rowR = rowR40; colD = colD40; rowD = rowD40; } else if ( blockIdx.x%matrices==24 ) { valR = valR39; valD = valD39; colR = colR39; rowR = rowR39; colD = colD39; rowD = rowD39; } else if ( blockIdx.x%matrices==25 ) { valR = valR38; valD = valD38; colR = colR38; rowR = rowR38; colD = colD38; rowD = rowD38; } else if ( blockIdx.x%matrices==26 ) { valR = valR37; valD = valD37; colR = colR37; rowR = rowR37; colD = colD37; rowD = rowD37; } else if ( blockIdx.x%matrices==27 ) { valR = valR36; valD = valD36; colR = colR36; rowR = rowR36; colD = colD36; rowD = rowD36; } else if ( blockIdx.x%matrices==28 ) { valR = valR35; valD = valD35; colR = colR35; rowR = rowR35; colD = colD35; rowD = rowD35; } else if ( blockIdx.x%matrices==29 ) { valR = valR34; valD = valD34; colR = colR34; rowR = rowR34; colD = colD34; rowD = rowD34; } else if ( blockIdx.x%matrices==30 ) { valR = valR33; valD = valD33; colR = colR33; rowR = rowR33; colD = colD33; rowD = rowD33; } else if ( blockIdx.x%matrices==31 ) { valR = valR32; valD = valD32; colR = colR32; rowR = rowR32; colD = colD32; rowD = rowD32; } else if ( blockIdx.x%matrices==32 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==33 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==34 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==35 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==36 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==37 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==38 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==39 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==40 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==41 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==42 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==43 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==44 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==45 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==46 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==47 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==48 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==49 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==50 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==51 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==52 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==53 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==54 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==55 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==56 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==57 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==58 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==59 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==60 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==61 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==62 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==63 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration with directed restricted additive Schwarz overlap (top-down) performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param[in] localiters magma_int_t number of local Jacobi-like updates @param[in] matrices magma_int_t number of sub-matrices @param[in] overlap magma_int_t size of the overlap @param[in] D magma_s_matrix* set of matrices with diagonal blocks @param[in] R magma_s_matrix* set of matrices with non-diagonal parts @param[in] b magma_s_matrix RHS @param[in] x magma_s_matrix* iterate/solution @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sbajac_csr_overlap( magma_int_t localiters, magma_int_t matrices, magma_int_t overlap, magma_s_matrix *D, magma_s_matrix *R, magma_s_matrix b, magma_s_matrix *x, magma_queue_t queue ) { int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int size = D[0].num_rows; int min_nnz=100; for(int i=0; i<matrices; i++){ min_nnz = min(min_nnz, R[i].nnz); } if ( min_nnz > -1 ){ if ( matrices == 1 ){ int dimgrid1 = magma_ceildiv( size , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_sbajac_csr_o_ls_kernel1), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, b.dval, x->dval ); } else if (matrices == 2) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_sbajac_csr_o_ls_kernel2), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, b.dval, x->dval ); //magma_sbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 4){ int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_sbajac_csr_o_ls_kernel4), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, b.dval, x->dval ); //magma_sbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 8) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_sbajac_csr_o_ls_kernel8), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, D[4].dval, D[4].drow, D[4].dcol, R[4].dval, R[4].drow, R[4].dcol, D[5].dval, D[5].drow, D[5].dcol, R[5].dval, R[5].drow, R[5].dcol, D[6].dval, D[6].drow, D[6].dcol, R[6].dval, R[6].drow, R[6].dcol, D[7].dval, D[7].drow, D[7].dcol, R[7].dval, R[7].drow, R[7].dcol, b.dval, x->dval ); //magma_sbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 16) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_sbajac_csr_o_ls_kernel16), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, b.dval, x->dval ); } else if (matrices == 32) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_sbajac_csr_o_ls_kernel32), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, b.dval, x->dval ); } else if (matrices == 64) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_sbajac_csr_o_ls_kernel64), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, D[32].dval, D[32].drow, D[32].dcol, R[32].dval, R[32].drow, R[32].dcol, D[33].dval, D[33].drow, D[33].dcol, R[33].dval, R[33].drow, R[33].dcol, D[34].dval, D[34].drow, D[34].dcol, R[34].dval, R[34].drow, R[34].dcol, D[35].dval, D[35].drow, D[35].dcol, R[35].dval, R[35].drow, R[35].dcol, D[36].dval, D[36].drow, D[36].dcol, R[36].dval, R[36].drow, R[36].dcol, D[37].dval, D[37].drow, D[37].dcol, R[37].dval, R[37].drow, R[37].dcol, D[38].dval, D[38].drow, D[38].dcol, R[38].dval, R[38].drow, R[38].dcol, D[39].dval, D[39].drow, D[39].dcol, R[39].dval, R[39].drow, R[39].dcol, D[40].dval, D[40].drow, D[40].dcol, R[40].dval, R[40].drow, R[40].dcol, D[41].dval, D[41].drow, D[41].dcol, R[41].dval, R[41].drow, R[41].dcol, D[42].dval, D[42].drow, D[42].dcol, R[42].dval, R[42].drow, R[42].dcol, D[43].dval, D[43].drow, D[43].dcol, R[43].dval, R[43].drow, R[43].dcol, D[44].dval, D[44].drow, D[44].dcol, R[44].dval, R[44].drow, R[44].dcol, D[45].dval, D[45].drow, D[45].dcol, R[45].dval, R[45].drow, R[45].dcol, D[46].dval, D[46].drow, D[46].dcol, R[46].dval, R[46].drow, R[46].dcol, D[47].dval, D[47].drow, D[47].dcol, R[47].dval, R[47].drow, R[47].dcol, D[48].dval, D[48].drow, D[48].dcol, R[48].dval, R[48].drow, R[48].dcol, D[49].dval, D[49].drow, D[49].dcol, R[49].dval, R[49].drow, R[49].dcol, D[50].dval, D[50].drow, D[50].dcol, R[50].dval, R[50].drow, R[50].dcol, D[51].dval, D[51].drow, D[51].dcol, R[51].dval, R[51].drow, R[51].dcol, D[52].dval, D[52].drow, D[52].dcol, R[52].dval, R[52].drow, R[52].dcol, D[53].dval, D[53].drow, D[53].dcol, R[53].dval, R[53].drow, R[53].dcol, D[54].dval, D[54].drow, D[54].dcol, R[54].dval, R[54].drow, R[54].dcol, D[55].dval, D[55].drow, D[55].dcol, R[55].dval, R[55].drow, R[55].dcol, D[56].dval, D[56].drow, D[56].dcol, R[56].dval, R[56].drow, R[56].dcol, D[57].dval, D[57].drow, D[57].dcol, R[57].dval, R[57].drow, R[57].dcol, D[58].dval, D[58].drow, D[58].dcol, R[58].dval, R[58].drow, R[58].dcol, D[59].dval, D[59].drow, D[59].dcol, R[59].dval, R[59].drow, R[59].dcol, D[60].dval, D[60].drow, D[60].dcol, R[60].dval, R[60].drow, R[60].dcol, D[61].dval, D[61].drow, D[61].dcol, R[61].dval, R[61].drow, R[61].dcol, D[62].dval, D[62].drow, D[62].dcol, R[62].dval, R[62].drow, R[62].dcol, D[63].dval, D[63].drow, D[63].dcol, R[63].dval, R[63].drow, R[63].dcol, b.dval, x->dval ); //magma_sbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else { printf("error: invalid matrix count.\n"); } } else { printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
3441bde9a7e9dc86ef12f485c300276e730761a3.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zbajac_csr_overlap.cu, normal z -> s, Wed Jan 2 14:18:53 2019 */ #include "magmasparse_internal.h" #define PRECISION_s #define BLOCKSIZE 256 __global__ void magma_sk_testLocking(unsigned int* locks, int n) { int id = threadIdx.x % n; bool leaveLoop = false; while (!leaveLoop) { if (atomicExch(&(locks[id]), 1u) == 0u) { //critical section leaveLoop = true; atomicExch(&(locks[id]),0u); } } } /* __global__ void magma_sbajac_csr_o_ls_kernel(int localiters, int n, int matrices, int overlap, magma_s_matrix *D, magma_s_matrix *R, const float * __restrict__ b, float * x ) { // int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; // int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; __shared__ float local_x[ BLOCKSIZE ]; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; float *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; //valR = R[ (1+blockIdx.x-1)%matrices ].dval; //colR = R[ (1+blockIdx.x-1)%matrices ].dcol; //rowR = R[ (1+blockIdx.x-1)%matrices ].drow; //valD = D[ (1+blockIdx.x-1)%matrices ].dval; //colD = D[ (1+blockIdx.x-1)%matrices ].dcol; //rowD = D[ (1+blockIdx.x-1)%matrices ].drow; if (blockIdx.x%2 == 1) { valR = R[0].dval; valD = D[0].dval; colR = R[0].dcol; rowR = R[0].drow; colD = D[0].dcol; rowD = D[0].drow; } else { valR = R[1].dval; valD = D[1].dval; colR = R[1].dcol; rowR = R[1].drow; colD = D[1].dcol; rowD = D[1].drow; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; printf("bdx:%d idx:%d start:%d end:%d\n", blockIdx.x, threadIdx.x, start, end); #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for (i = start; i < end; i++) v += valR[i] * x[ colR[i] ]; start = rowD[index]; end = rowD[index+1]; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations local_x[threadIdx.x] = x[index]; //+ ( v - tmp); // / (valD[start]); __syncthreads(); #pragma unroll for (j = 0; j < localiters-1; j++) { tmp = zero; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if (threadIdx.x > overlap) { // RAS x[index] = local_x[threadIdx.x]; } } } */ __global__ void magma_sbajac_csr_o_ls_kernel1(int localiters, int n, int matrices, int overlap, float * valD, magma_index_t * rowD, magma_index_t * colD, float * valR, magma_index_t * rowR, magma_index_t * colR, const float * __restrict__ b, float * x ) { int inddiag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; //bool leaveLoop = false; if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_sbajac_csr_o_ls_kernel2(int localiters, int n, int matrices, int overlap, float * valD0, magma_index_t * rowD0, magma_index_t * colD0, float * valR0, magma_index_t * rowR0, magma_index_t * colR0, float * valD1, magma_index_t * rowD1, magma_index_t * colD1, float * valR1, magma_index_t * rowR1, magma_index_t * colR1, const float * __restrict__ b, float * x ) { int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; //bool leaveLoop = false; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; float *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if (blockIdx.x%matrices == 0) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if (blockIdx.x%matrices == 1) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_sbajac_csr_o_ls_kernel4(int localiters, int n, int matrices, int overlap, float * valD0, magma_index_t * rowD0, magma_index_t * colD0, float * valR0, magma_index_t * rowR0, magma_index_t * colR0, float * valD1, magma_index_t * rowD1, magma_index_t * colD1, float * valR1, magma_index_t * rowR1, magma_index_t * colR1, float * valD2, magma_index_t * rowD2, magma_index_t * colD2, float * valR2, magma_index_t * rowR2, magma_index_t * colR2, float * valD3, magma_index_t * rowD3, magma_index_t * colD3, float * valR3, magma_index_t * rowR3, magma_index_t * colR3, const float * __restrict__ b, float * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; //bool leaveLoop = false; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; float *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==1 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==2 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==3 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_sbajac_csr_o_ls_kernel8(int localiters, int n, int matrices, int overlap, float * valD0, magma_index_t * rowD0, magma_index_t * colD0, float * valR0, magma_index_t * rowR0, magma_index_t * colR0, float * valD1, magma_index_t * rowD1, magma_index_t * colD1, float * valR1, magma_index_t * rowR1, magma_index_t * colR1, float * valD2, magma_index_t * rowD2, magma_index_t * colD2, float * valR2, magma_index_t * rowR2, magma_index_t * colR2, float * valD3, magma_index_t * rowD3, magma_index_t * colD3, float * valR3, magma_index_t * rowR3, magma_index_t * colR3, float * valD4, magma_index_t * rowD4, magma_index_t * colD4, float * valR4, magma_index_t * rowR4, magma_index_t * colR4, float * valD5, magma_index_t * rowD5, magma_index_t * colD5, float * valR5, magma_index_t * rowR5, magma_index_t * colR5, float * valD6, magma_index_t * rowD6, magma_index_t * colD6, float * valR6, magma_index_t * rowR6, magma_index_t * colR6, float * valD7, magma_index_t * rowD7, magma_index_t * colD7, float * valR7, magma_index_t * rowR7, magma_index_t * colR7, const float * __restrict__ b, float * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; float *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if( blockIdx.x%matrices==0 ){ valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }else if ( blockIdx.x%matrices==1 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }else if ( blockIdx.x%matrices==2 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }else if ( blockIdx.x%matrices==3 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }else if ( blockIdx.x%matrices==4 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==5 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==6 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==7 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_sbajac_csr_o_ls_kernel16(int localiters, int n, int matrices, int overlap, float *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , float *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , float *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , float *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , float *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , float *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , float *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , float *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , float *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , float *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , float *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , float *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , float *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , float *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , float *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , float *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , float *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , float *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , float *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , float *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , float *valD10, magma_index_t *rowD10, magma_index_t *colD10, float *valR10, magma_index_t *rowR10, magma_index_t *colR10, float *valD11, magma_index_t *rowD11, magma_index_t *colD11, float *valR11, magma_index_t *rowR11, magma_index_t *colR11, float *valD12, magma_index_t *rowD12, magma_index_t *colD12, float *valR12, magma_index_t *rowR12, magma_index_t *colR12, float *valD13, magma_index_t *rowD13, magma_index_t *colD13, float *valR13, magma_index_t *rowR13, magma_index_t *colR13, float *valD14, magma_index_t *rowD14, magma_index_t *colD14, float *valR14, magma_index_t *rowR14, magma_index_t *colR14, float *valD15, magma_index_t *rowD15, magma_index_t *colD15, float *valR15, magma_index_t *rowR15, magma_index_t *colR15, const float * __restrict__ b, float * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; float *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==1 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==2 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==3 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==4 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==5 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==6 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==7 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==8 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==9 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==10 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==11 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==12 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==13 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==14 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==15 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_sbajac_csr_o_ls_kernel32(int localiters, int n, int matrices, int overlap, float *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , float *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , float *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , float *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , float *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , float *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , float *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , float *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , float *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , float *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , float *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , float *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , float *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , float *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , float *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , float *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , float *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , float *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , float *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , float *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , float *valD10, magma_index_t *rowD10, magma_index_t *colD10, float *valR10, magma_index_t *rowR10, magma_index_t *colR10, float *valD11, magma_index_t *rowD11, magma_index_t *colD11, float *valR11, magma_index_t *rowR11, magma_index_t *colR11, float *valD12, magma_index_t *rowD12, magma_index_t *colD12, float *valR12, magma_index_t *rowR12, magma_index_t *colR12, float *valD13, magma_index_t *rowD13, magma_index_t *colD13, float *valR13, magma_index_t *rowR13, magma_index_t *colR13, float *valD14, magma_index_t *rowD14, magma_index_t *colD14, float *valR14, magma_index_t *rowR14, magma_index_t *colR14, float *valD15, magma_index_t *rowD15, magma_index_t *colD15, float *valR15, magma_index_t *rowR15, magma_index_t *colR15, float *valD16, magma_index_t *rowD16, magma_index_t *colD16, float *valR16, magma_index_t *rowR16, magma_index_t *colR16, float *valD17, magma_index_t *rowD17, magma_index_t *colD17, float *valR17, magma_index_t *rowR17, magma_index_t *colR17, float *valD18, magma_index_t *rowD18, magma_index_t *colD18, float *valR18, magma_index_t *rowR18, magma_index_t *colR18, float *valD19, magma_index_t *rowD19, magma_index_t *colD19, float *valR19, magma_index_t *rowR19, magma_index_t *colR19, float *valD20, magma_index_t *rowD20, magma_index_t *colD20, float *valR20, magma_index_t *rowR20, magma_index_t *colR20, float *valD21, magma_index_t *rowD21, magma_index_t *colD21, float *valR21, magma_index_t *rowR21, magma_index_t *colR21, float *valD22, magma_index_t *rowD22, magma_index_t *colD22, float *valR22, magma_index_t *rowR22, magma_index_t *colR22, float *valD23, magma_index_t *rowD23, magma_index_t *colD23, float *valR23, magma_index_t *rowR23, magma_index_t *colR23, float *valD24, magma_index_t *rowD24, magma_index_t *colD24, float *valR24, magma_index_t *rowR24, magma_index_t *colR24, float *valD25, magma_index_t *rowD25, magma_index_t *colD25, float *valR25, magma_index_t *rowR25, magma_index_t *colR25, float *valD26, magma_index_t *rowD26, magma_index_t *colD26, float *valR26, magma_index_t *rowR26, magma_index_t *colR26, float *valD27, magma_index_t *rowD27, magma_index_t *colD27, float *valR27, magma_index_t *rowR27, magma_index_t *colR27, float *valD28, magma_index_t *rowD28, magma_index_t *colD28, float *valR28, magma_index_t *rowR28, magma_index_t *colR28, float *valD29, magma_index_t *rowD29, magma_index_t *colD29, float *valR29, magma_index_t *rowR29, magma_index_t *colR29, float *valD30, magma_index_t *rowD30, magma_index_t *colD30, float *valR30, magma_index_t *rowR30, magma_index_t *colR30, float *valD31, magma_index_t *rowD31, magma_index_t *colD31, float *valR31, magma_index_t *rowR31, magma_index_t *colR31, const float * __restrict__ b, float * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; float *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==1 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==2 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==3 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==4 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==5 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==6 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==7 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==8 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==9 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==10 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==11 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==12 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==13 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==14 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==15 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==16 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==17 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==18 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==19 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==20 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==21 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==22 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==23 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==24 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==25 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==26 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==27 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==28 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==29 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==30 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==31 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_sbajac_csr_o_ls_kernel64(int localiters, int n, int matrices, int overlap, float *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , float *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , float *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , float *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , float *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , float *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , float *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , float *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , float *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , float *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , float *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , float *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , float *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , float *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , float *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , float *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , float *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , float *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , float *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , float *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , float *valD10, magma_index_t *rowD10, magma_index_t *colD10, float *valR10, magma_index_t *rowR10, magma_index_t *colR10, float *valD11, magma_index_t *rowD11, magma_index_t *colD11, float *valR11, magma_index_t *rowR11, magma_index_t *colR11, float *valD12, magma_index_t *rowD12, magma_index_t *colD12, float *valR12, magma_index_t *rowR12, magma_index_t *colR12, float *valD13, magma_index_t *rowD13, magma_index_t *colD13, float *valR13, magma_index_t *rowR13, magma_index_t *colR13, float *valD14, magma_index_t *rowD14, magma_index_t *colD14, float *valR14, magma_index_t *rowR14, magma_index_t *colR14, float *valD15, magma_index_t *rowD15, magma_index_t *colD15, float *valR15, magma_index_t *rowR15, magma_index_t *colR15, float *valD16, magma_index_t *rowD16, magma_index_t *colD16, float *valR16, magma_index_t *rowR16, magma_index_t *colR16, float *valD17, magma_index_t *rowD17, magma_index_t *colD17, float *valR17, magma_index_t *rowR17, magma_index_t *colR17, float *valD18, magma_index_t *rowD18, magma_index_t *colD18, float *valR18, magma_index_t *rowR18, magma_index_t *colR18, float *valD19, magma_index_t *rowD19, magma_index_t *colD19, float *valR19, magma_index_t *rowR19, magma_index_t *colR19, float *valD20, magma_index_t *rowD20, magma_index_t *colD20, float *valR20, magma_index_t *rowR20, magma_index_t *colR20, float *valD21, magma_index_t *rowD21, magma_index_t *colD21, float *valR21, magma_index_t *rowR21, magma_index_t *colR21, float *valD22, magma_index_t *rowD22, magma_index_t *colD22, float *valR22, magma_index_t *rowR22, magma_index_t *colR22, float *valD23, magma_index_t *rowD23, magma_index_t *colD23, float *valR23, magma_index_t *rowR23, magma_index_t *colR23, float *valD24, magma_index_t *rowD24, magma_index_t *colD24, float *valR24, magma_index_t *rowR24, magma_index_t *colR24, float *valD25, magma_index_t *rowD25, magma_index_t *colD25, float *valR25, magma_index_t *rowR25, magma_index_t *colR25, float *valD26, magma_index_t *rowD26, magma_index_t *colD26, float *valR26, magma_index_t *rowR26, magma_index_t *colR26, float *valD27, magma_index_t *rowD27, magma_index_t *colD27, float *valR27, magma_index_t *rowR27, magma_index_t *colR27, float *valD28, magma_index_t *rowD28, magma_index_t *colD28, float *valR28, magma_index_t *rowR28, magma_index_t *colR28, float *valD29, magma_index_t *rowD29, magma_index_t *colD29, float *valR29, magma_index_t *rowR29, magma_index_t *colR29, float *valD30, magma_index_t *rowD30, magma_index_t *colD30, float *valR30, magma_index_t *rowR30, magma_index_t *colR30, float *valD31, magma_index_t *rowD31, magma_index_t *colD31, float *valR31, magma_index_t *rowR31, magma_index_t *colR31, float *valD32, magma_index_t *rowD32, magma_index_t *colD32, float *valR32, magma_index_t *rowR32, magma_index_t *colR32, float *valD33, magma_index_t *rowD33, magma_index_t *colD33, float *valR33, magma_index_t *rowR33, magma_index_t *colR33, float *valD34, magma_index_t *rowD34, magma_index_t *colD34, float *valR34, magma_index_t *rowR34, magma_index_t *colR34, float *valD35, magma_index_t *rowD35, magma_index_t *colD35, float *valR35, magma_index_t *rowR35, magma_index_t *colR35, float *valD36, magma_index_t *rowD36, magma_index_t *colD36, float *valR36, magma_index_t *rowR36, magma_index_t *colR36, float *valD37, magma_index_t *rowD37, magma_index_t *colD37, float *valR37, magma_index_t *rowR37, magma_index_t *colR37, float *valD38, magma_index_t *rowD38, magma_index_t *colD38, float *valR38, magma_index_t *rowR38, magma_index_t *colR38, float *valD39, magma_index_t *rowD39, magma_index_t *colD39, float *valR39, magma_index_t *rowR39, magma_index_t *colR39, float *valD40, magma_index_t *rowD40, magma_index_t *colD40, float *valR40, magma_index_t *rowR40, magma_index_t *colR40, float *valD41, magma_index_t *rowD41, magma_index_t *colD41, float *valR41, magma_index_t *rowR41, magma_index_t *colR41, float *valD42, magma_index_t *rowD42, magma_index_t *colD42, float *valR42, magma_index_t *rowR42, magma_index_t *colR42, float *valD43, magma_index_t *rowD43, magma_index_t *colD43, float *valR43, magma_index_t *rowR43, magma_index_t *colR43, float *valD44, magma_index_t *rowD44, magma_index_t *colD44, float *valR44, magma_index_t *rowR44, magma_index_t *colR44, float *valD45, magma_index_t *rowD45, magma_index_t *colD45, float *valR45, magma_index_t *rowR45, magma_index_t *colR45, float *valD46, magma_index_t *rowD46, magma_index_t *colD46, float *valR46, magma_index_t *rowR46, magma_index_t *colR46, float *valD47, magma_index_t *rowD47, magma_index_t *colD47, float *valR47, magma_index_t *rowR47, magma_index_t *colR47, float *valD48, magma_index_t *rowD48, magma_index_t *colD48, float *valR48, magma_index_t *rowR48, magma_index_t *colR48, float *valD49, magma_index_t *rowD49, magma_index_t *colD49, float *valR49, magma_index_t *rowR49, magma_index_t *colR49, float *valD50, magma_index_t *rowD50, magma_index_t *colD50, float *valR50, magma_index_t *rowR50, magma_index_t *colR50, float *valD51, magma_index_t *rowD51, magma_index_t *colD51, float *valR51, magma_index_t *rowR51, magma_index_t *colR51, float *valD52, magma_index_t *rowD52, magma_index_t *colD52, float *valR52, magma_index_t *rowR52, magma_index_t *colR52, float *valD53, magma_index_t *rowD53, magma_index_t *colD53, float *valR53, magma_index_t *rowR53, magma_index_t *colR53, float *valD54, magma_index_t *rowD54, magma_index_t *colD54, float *valR54, magma_index_t *rowR54, magma_index_t *colR54, float *valD55, magma_index_t *rowD55, magma_index_t *colD55, float *valR55, magma_index_t *rowR55, magma_index_t *colR55, float *valD56, magma_index_t *rowD56, magma_index_t *colD56, float *valR56, magma_index_t *rowR56, magma_index_t *colR56, float *valD57, magma_index_t *rowD57, magma_index_t *colD57, float *valR57, magma_index_t *rowR57, magma_index_t *colR57, float *valD58, magma_index_t *rowD58, magma_index_t *colD58, float *valR58, magma_index_t *rowR58, magma_index_t *colR58, float *valD59, magma_index_t *rowD59, magma_index_t *colD59, float *valR59, magma_index_t *rowR59, magma_index_t *colR59, float *valD60, magma_index_t *rowD60, magma_index_t *colD60, float *valR60, magma_index_t *rowR60, magma_index_t *colR60, float *valD61, magma_index_t *rowD61, magma_index_t *colD61, float *valR61, magma_index_t *rowR61, magma_index_t *colR61, float *valD62, magma_index_t *rowD62, magma_index_t *colD62, float *valR62, magma_index_t *rowR62, magma_index_t *colR62, float *valD63, magma_index_t *rowD63, magma_index_t *colD63, float *valR63, magma_index_t *rowR63, magma_index_t *colR63, const float * __restrict__ b, float * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; float *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR63; valD = valD63; colR = colR63; rowR = rowR63; colD = colD63; rowD = rowD63; } else if ( blockIdx.x%matrices==1 ) { valR = valR62; valD = valD62; colR = colR62; rowR = rowR62; colD = colD62; rowD = rowD62; } else if ( blockIdx.x%matrices==2 ) { valR = valR61; valD = valD61; colR = colR61; rowR = rowR61; colD = colD61; rowD = rowD61; } else if ( blockIdx.x%matrices==3 ) { valR = valR60; valD = valD60; colR = colR60; rowR = rowR60; colD = colD60; rowD = rowD60; } else if ( blockIdx.x%matrices==4 ) { valR = valR59; valD = valD59; colR = colR59; rowR = rowR59; colD = colD59; rowD = rowD59; } else if ( blockIdx.x%matrices==5 ) { valR = valR58; valD = valD58; colR = colR58; rowR = rowR58; colD = colD58; rowD = rowD58; } else if ( blockIdx.x%matrices==6 ) { valR = valR57; valD = valD57; colR = colR57; rowR = rowR57; colD = colD57; rowD = rowD57; } else if ( blockIdx.x%matrices==7 ) { valR = valR56; valD = valD56; colR = colR56; rowR = rowR56; colD = colD56; rowD = rowD56; } else if ( blockIdx.x%matrices==8 ) { valR = valR55; valD = valD55; colR = colR55; rowR = rowR55; colD = colD55; rowD = rowD55; } else if ( blockIdx.x%matrices==9 ) { valR = valR54; valD = valD54; colR = colR54; rowR = rowR54; colD = colD54; rowD = rowD54; } else if ( blockIdx.x%matrices==10 ) { valR = valR53; valD = valD53; colR = colR53; rowR = rowR53; colD = colD53; rowD = rowD53; } else if ( blockIdx.x%matrices==11 ) { valR = valR52; valD = valD52; colR = colR52; rowR = rowR52; colD = colD52; rowD = rowD52; } else if ( blockIdx.x%matrices==12 ) { valR = valR51; valD = valD51; colR = colR51; rowR = rowR51; colD = colD51; rowD = rowD51; } else if ( blockIdx.x%matrices==13 ) { valR = valR50; valD = valD50; colR = colR50; rowR = rowR50; colD = colD50; rowD = rowD50; } else if ( blockIdx.x%matrices==14 ) { valR = valR49; valD = valD49; colR = colR49; rowR = rowR49; colD = colD49; rowD = rowD49; } else if ( blockIdx.x%matrices==15 ) { valR = valR48; valD = valD48; colR = colR48; rowR = rowR48; colD = colD48; rowD = rowD48; } else if ( blockIdx.x%matrices==16 ) { valR = valR47; valD = valD47; colR = colR47; rowR = rowR47; colD = colD47; rowD = rowD47; } else if ( blockIdx.x%matrices==17 ) { valR = valR46; valD = valD46; colR = colR46; rowR = rowR46; colD = colD46; rowD = rowD46; } else if ( blockIdx.x%matrices==18 ) { valR = valR45; valD = valD45; colR = colR45; rowR = rowR45; colD = colD45; rowD = rowD45; } else if ( blockIdx.x%matrices==19 ) { valR = valR44; valD = valD44; colR = colR44; rowR = rowR44; colD = colD44; rowD = rowD44; } else if ( blockIdx.x%matrices==20 ) { valR = valR43; valD = valD43; colR = colR43; rowR = rowR43; colD = colD43; rowD = rowD43; } else if ( blockIdx.x%matrices==21 ) { valR = valR42; valD = valD42; colR = colR42; rowR = rowR42; colD = colD42; rowD = rowD42; } else if ( blockIdx.x%matrices==22 ) { valR = valR41; valD = valD41; colR = colR41; rowR = rowR41; colD = colD41; rowD = rowD41; } else if ( blockIdx.x%matrices==23 ) { valR = valR40; valD = valD40; colR = colR40; rowR = rowR40; colD = colD40; rowD = rowD40; } else if ( blockIdx.x%matrices==24 ) { valR = valR39; valD = valD39; colR = colR39; rowR = rowR39; colD = colD39; rowD = rowD39; } else if ( blockIdx.x%matrices==25 ) { valR = valR38; valD = valD38; colR = colR38; rowR = rowR38; colD = colD38; rowD = rowD38; } else if ( blockIdx.x%matrices==26 ) { valR = valR37; valD = valD37; colR = colR37; rowR = rowR37; colD = colD37; rowD = rowD37; } else if ( blockIdx.x%matrices==27 ) { valR = valR36; valD = valD36; colR = colR36; rowR = rowR36; colD = colD36; rowD = rowD36; } else if ( blockIdx.x%matrices==28 ) { valR = valR35; valD = valD35; colR = colR35; rowR = rowR35; colD = colD35; rowD = rowD35; } else if ( blockIdx.x%matrices==29 ) { valR = valR34; valD = valD34; colR = colR34; rowR = rowR34; colD = colD34; rowD = rowD34; } else if ( blockIdx.x%matrices==30 ) { valR = valR33; valD = valD33; colR = colR33; rowR = rowR33; colD = colD33; rowD = rowD33; } else if ( blockIdx.x%matrices==31 ) { valR = valR32; valD = valD32; colR = colR32; rowR = rowR32; colD = colD32; rowD = rowD32; } else if ( blockIdx.x%matrices==32 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==33 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==34 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==35 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==36 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==37 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==38 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==39 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==40 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==41 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==42 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==43 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==44 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==45 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==46 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==47 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==48 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==49 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==50 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==51 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==52 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==53 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==54 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==55 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==56 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==57 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==58 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==59 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==60 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==61 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==62 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==63 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration with directed restricted additive Schwarz overlap (top-down) performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param[in] localiters magma_int_t number of local Jacobi-like updates @param[in] matrices magma_int_t number of sub-matrices @param[in] overlap magma_int_t size of the overlap @param[in] D magma_s_matrix* set of matrices with diagonal blocks @param[in] R magma_s_matrix* set of matrices with non-diagonal parts @param[in] b magma_s_matrix RHS @param[in] x magma_s_matrix* iterate/solution @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sbajac_csr_overlap( magma_int_t localiters, magma_int_t matrices, magma_int_t overlap, magma_s_matrix *D, magma_s_matrix *R, magma_s_matrix b, magma_s_matrix *x, magma_queue_t queue ) { int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int size = D[0].num_rows; int min_nnz=100; for(int i=0; i<matrices; i++){ min_nnz = min(min_nnz, R[i].nnz); } if ( min_nnz > -1 ){ if ( matrices == 1 ){ int dimgrid1 = magma_ceildiv( size , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_sbajac_csr_o_ls_kernel1<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, b.dval, x->dval ); } else if (matrices == 2) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_sbajac_csr_o_ls_kernel2<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, b.dval, x->dval ); //magma_sbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 4){ int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_sbajac_csr_o_ls_kernel4<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, b.dval, x->dval ); //magma_sbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 8) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_sbajac_csr_o_ls_kernel8<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, D[4].dval, D[4].drow, D[4].dcol, R[4].dval, R[4].drow, R[4].dcol, D[5].dval, D[5].drow, D[5].dcol, R[5].dval, R[5].drow, R[5].dcol, D[6].dval, D[6].drow, D[6].dcol, R[6].dval, R[6].drow, R[6].dcol, D[7].dval, D[7].drow, D[7].dcol, R[7].dval, R[7].drow, R[7].dcol, b.dval, x->dval ); //magma_sbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 16) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_sbajac_csr_o_ls_kernel16<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, b.dval, x->dval ); } else if (matrices == 32) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_sbajac_csr_o_ls_kernel32<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, b.dval, x->dval ); } else if (matrices == 64) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_sbajac_csr_o_ls_kernel64<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, D[32].dval, D[32].drow, D[32].dcol, R[32].dval, R[32].drow, R[32].dcol, D[33].dval, D[33].drow, D[33].dcol, R[33].dval, R[33].drow, R[33].dcol, D[34].dval, D[34].drow, D[34].dcol, R[34].dval, R[34].drow, R[34].dcol, D[35].dval, D[35].drow, D[35].dcol, R[35].dval, R[35].drow, R[35].dcol, D[36].dval, D[36].drow, D[36].dcol, R[36].dval, R[36].drow, R[36].dcol, D[37].dval, D[37].drow, D[37].dcol, R[37].dval, R[37].drow, R[37].dcol, D[38].dval, D[38].drow, D[38].dcol, R[38].dval, R[38].drow, R[38].dcol, D[39].dval, D[39].drow, D[39].dcol, R[39].dval, R[39].drow, R[39].dcol, D[40].dval, D[40].drow, D[40].dcol, R[40].dval, R[40].drow, R[40].dcol, D[41].dval, D[41].drow, D[41].dcol, R[41].dval, R[41].drow, R[41].dcol, D[42].dval, D[42].drow, D[42].dcol, R[42].dval, R[42].drow, R[42].dcol, D[43].dval, D[43].drow, D[43].dcol, R[43].dval, R[43].drow, R[43].dcol, D[44].dval, D[44].drow, D[44].dcol, R[44].dval, R[44].drow, R[44].dcol, D[45].dval, D[45].drow, D[45].dcol, R[45].dval, R[45].drow, R[45].dcol, D[46].dval, D[46].drow, D[46].dcol, R[46].dval, R[46].drow, R[46].dcol, D[47].dval, D[47].drow, D[47].dcol, R[47].dval, R[47].drow, R[47].dcol, D[48].dval, D[48].drow, D[48].dcol, R[48].dval, R[48].drow, R[48].dcol, D[49].dval, D[49].drow, D[49].dcol, R[49].dval, R[49].drow, R[49].dcol, D[50].dval, D[50].drow, D[50].dcol, R[50].dval, R[50].drow, R[50].dcol, D[51].dval, D[51].drow, D[51].dcol, R[51].dval, R[51].drow, R[51].dcol, D[52].dval, D[52].drow, D[52].dcol, R[52].dval, R[52].drow, R[52].dcol, D[53].dval, D[53].drow, D[53].dcol, R[53].dval, R[53].drow, R[53].dcol, D[54].dval, D[54].drow, D[54].dcol, R[54].dval, R[54].drow, R[54].dcol, D[55].dval, D[55].drow, D[55].dcol, R[55].dval, R[55].drow, R[55].dcol, D[56].dval, D[56].drow, D[56].dcol, R[56].dval, R[56].drow, R[56].dcol, D[57].dval, D[57].drow, D[57].dcol, R[57].dval, R[57].drow, R[57].dcol, D[58].dval, D[58].drow, D[58].dcol, R[58].dval, R[58].drow, R[58].dcol, D[59].dval, D[59].drow, D[59].dcol, R[59].dval, R[59].drow, R[59].dcol, D[60].dval, D[60].drow, D[60].dcol, R[60].dval, R[60].drow, R[60].dcol, D[61].dval, D[61].drow, D[61].dcol, R[61].dval, R[61].drow, R[61].dcol, D[62].dval, D[62].drow, D[62].dcol, R[62].dval, R[62].drow, R[62].dcol, D[63].dval, D[63].drow, D[63].dcol, R[63].dval, R[63].drow, R[63].dcol, b.dval, x->dval ); //magma_sbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else { printf("error: invalid matrix count.\n"); } } else { printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
b68572eb81e56a606b71173f6e43541a4937fb8e.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the spopecific language governing permissions and limitations under the License. */ #include <utility> #include <vector> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/memory/memory.h" #ifdef PADDLE_WITH_HIP #include "paddle/fluid/operators/conv_miopen_helper.h" #else #include "paddle/fluid/operators/conv_cudnn_helper.h" #endif #include "paddle/fluid/operators/conv_op.h" #include "paddle/fluid/operators/math/padding.h" #include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/profiler.h" DECLARE_bool(cudnn_deterministic); DECLARE_uint64(conv_workspace_size_limit); DECLARE_bool(cudnn_exhaustive_search); namespace paddle { namespace operators { using Tensor = framework::Tensor; using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; using DataLayout = platform::DataLayout; static inline bool IsVoltaOrLater(const platform::CUDADeviceContext& dev_ctx) { return dev_ctx.GetComputeCapability() >= 70; } template <typename T> class CUDNNConvOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); const Tensor* input = ctx.Input<Tensor>("Input"); auto* filter = ctx.Input<Tensor>("Filter"); auto* output = ctx.Output<Tensor>("Output"); output->mutable_data<T>(ctx.GetPlace()); const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); int groups = ctx.Attr<int>("groups"); bool exhaustive_search = FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search"); bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, platform::errors::InvalidArgument( "Cann't set exhaustive_search True and " "FLAGS_cudnn_deterministic True at same time.")); const std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm"); const std::string data_format = ctx.Attr<std::string>("data_format"); const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); auto dtype = platform::CudnnDataType<T>::type; #ifdef PADDLE_WITH_HIP // HIP MIOPEN ONLY SUPPORT NCHW format auto compute_format = DataLayout::kNCHW; #else // Tensor Core introduced from Volta GPUs supports more faster conv op // with FP16 in NHWC data format. const bool compute_in_nhwc = dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx); // We will only do data format conversion from NHWC to NCHW. // cudnn will convert NCHW to NHWC automatically on Tensor Core. auto compute_format = compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW; #endif VLOG(3) << "Compute ConvOp with cuDNN:" << " data_format=" << data_format << " compute_format=" << (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW"); // ------------ transformed tensor ----------- Tensor transformed_input_channel(input->type()); Tensor transformed_output(output->type()); Tensor transformed_filter_channel(filter->type()); T* output_data = nullptr; if (channel_last && compute_format == DataLayout::kNCHW) { VLOG(3) << "Transform input tensor from NHWC to NCHW."; ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, input, &transformed_input_channel); TransToChannelFirst<platform::CUDADeviceContext, T>( ctx, input, &transformed_input_channel); ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, output, &transformed_output); } else { transformed_input_channel.ShareDataWith(*input); transformed_output.ShareDataWith(*output); } if (compute_format == DataLayout::kNHWC) { VLOG(3) << "Transform filter tensor from NCHW to NHWC."; ResizeToChannelLast<platform::CUDADeviceContext, T>( ctx, filter, &transformed_filter_channel); TransToChannelLast<platform::CUDADeviceContext, T>( ctx, filter, &transformed_filter_channel); } else { transformed_filter_channel.ShareDataWith(*filter); } output_data = transformed_output.data<T>(); // update padding and dilation auto in_dims = transformed_input_channel.dims(); auto filter_dims = transformed_filter_channel.dims(); framework::DDim in_data_dims; framework::DDim filter_data_dims; if (compute_format == DataLayout::kNCHW) { in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size()); filter_data_dims = framework::slice_ddim(filter_dims, 2, filter_dims.size()); } else { in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1); filter_data_dims = framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1); } std::vector<int> ksize = framework::vectorize<int>(filter_data_dims); UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); Tensor transformed_input; std::vector<int> padding_common(data_dim, 0); if (!is_sys_pad) { std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_input_channel.dims()[0]; if (compute_format == DataLayout::kNCHW) { new_input_shape_vec[1] = transformed_input_channel.dims()[1]; } else { new_input_shape_vec[data_dim + 1] = transformed_input_channel.dims()[data_dim + 1]; } std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0); for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]); if (compute_format == DataLayout::kNCHW) { new_input_shape_vec[i + 2] = transformed_input_channel.dims()[i + 2] + padding_diff[i]; } else { new_input_shape_vec[i + 1] = transformed_input_channel.dims()[i + 1] + padding_diff[i]; } if (compute_format == DataLayout::kNCHW) { input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } else { input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i]; } } framework::DDim new_input_shape( framework::make_ddim(new_input_shape_vec)); transformed_input.Resize(new_input_shape); auto& dev_ctx = ctx.template device_context<paddle::platform::CUDADeviceContext>(); transformed_input = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); const int rank = transformed_input_channel.dims().size(); T pad_value(0.0); switch (rank) { case 4: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; case 5: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; default: PADDLE_THROW(platform::errors::InvalidArgument( "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_input.ShareDataWith(transformed_input_channel); if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* input_data = transformed_input.data<T>(); const T* filter_data = transformed_filter_channel.data<T>(); // ------------------- cudnn descriptors --------------------- ConvArgs args{&transformed_input, &transformed_filter_channel, &transformed_output, strides, padding_common, dilations, dtype}; auto handle = dev_ctx.cudnn_handle(); auto workspace_handle = dev_ctx.cudnn_workspace_handle(); DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; if (transformed_input.dims().size() == 5) { layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC : DataLayout::kNCDHW; } auto layout_format = GetCudnnTensorFormat(layout); args.handle = handle; #ifdef PADDLE_WITH_HIP // MIOPEN need to set groups in cdesc in miopen_desc.h args.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn(), groups); #else args.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn()); #endif #if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION_MIN(7, 0, 1) // cudnn 7 can support groups, no need to do it manually // FIXME(typhoonzero): find a better way to disable groups // rather than setting it to 1. PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnSetConvolutionGroupCount(args.cdesc.desc(), groups)); groups = 1; #endif #ifdef PADDLE_WITH_HIP // MIOPEN do not set groups in wdesc after set groups in cdesc groups = 1; #endif args.idesc.set(transformed_input, layout_format); args.wdesc.set(transformed_filter_channel, layout_format, groups); args.odesc.set(transformed_output, layout_format); int i_n, i_c, i_d, i_h, i_w; int o_n, o_c, o_d, o_h, o_w; if (compute_format == DataLayout::kNHWC) { GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d, &i_h, &i_w); GetNCDHW(transformed_output.dims(), DataLayout::kNHWC, &o_n, &o_c, &o_d, &o_h, &o_w); } else { GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); GetNCDHW(transformed_output.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); } int group_offset_in = i_c / groups * i_h * i_w * i_d; int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = transformed_filter_channel.numel() / groups; // ------------------- cudnn conv workspace --------------------- size_t workspace_size = 0; // final workspace to allocate. // ------------------- cudnn conv algorithm --------------------- #ifdef PADDLE_WITH_HIP miopenConvFwdAlgorithm_t algo{}; using search = SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = search::GetWorkspaceSize(args); algo = search::Find<T>(args, exhaustive_search, false, workspace_size, ctx); #else cudnnConvolutionFwdAlgo_t algo{}; using search = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; algo = search::Find<T>(args, exhaustive_search, false, ctx); workspace_size = search::GetWorkspaceSize(args, algo); #endif #if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION_MIN(7, 0, 1) // when groups > 1, SearchAlgorithm find algo is CUDNN_CONVOLUTION_\ // FWD_ALGO_WINOGRAD_NONFUSED, but this kind of algorithm is unstable // in forward computation, so change the algorithm to CUDNN_CONVOLUTION_\ // FWD_ALGO_IMPLICIT_GEMM manually. if (ctx.Attr<int>("groups") > 1) { algo = static_cast<cudnnConvolutionFwdAlgo_t>(0); } #endif // ------------------- cudnn conv forward --------------------- ScalingParamType<T> alpha = 1.0f; ScalingParamType<T> beta = 0.0f; // NOTE(zhiqiu): inplace addto is not supportted in double grad yet. // ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f; // VLOG(4) << "Conv: use_addto = " << ctx.Attr<bool>("use_addto"); #ifdef PADDLE_WITH_HIP workspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::miopenConvolutionForward( handle, &alpha, args.idesc.desc(), input_data, args.wdesc.desc(), filter_data, args.cdesc.desc(), algo, &beta, args.odesc.desc(), output_data, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { workspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionForward( handle, &alpha, args.idesc.desc(), input_data + i * group_offset_in, args.wdesc.desc(), filter_data + i * group_offset_filter, args.cdesc.desc(), algo, workspace_ptr, workspace_size, &beta, args.odesc.desc(), output_data + i * group_offset_out)); }, workspace_size); } #endif if (channel_last && compute_format == DataLayout::kNCHW) { TransToChannelLast<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_output, output); } } }; template <typename T> class CUDNNConvGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); auto input = ctx.Input<Tensor>("Input"); auto filter = ctx.Input<Tensor>("Filter"); auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input")); auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter")); if (input_grad) { input_grad->mutable_data<T>(ctx.GetPlace()); } if (filter_grad) { filter_grad->mutable_data<T>(ctx.GetPlace()); } std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm"); int groups = ctx.Attr<int>("groups"); bool exhaustive_search = FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search"); bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, platform::errors::InvalidArgument( "Cann't set exhaustive_search True and " "FLAGS_cudnn_deterministic True at same time.")); const std::string data_format = ctx.Attr<std::string>("data_format"); const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); auto dtype = platform::CudnnDataType<T>::type; #ifdef PADDLE_WITH_HIP // HIP MIOPEN ONLY SUPPORT NCHW format auto compute_format = DataLayout::kNCHW; #else const bool compute_in_nhwc = dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx); auto compute_format = compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW; #endif VLOG(3) << "Compute ConvGradOp with cuDNN:" << " data_format=" << data_format << " compute_format=" << (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW"); // transform Tensor Tensor transformed_input_channel(input->type()); Tensor transformed_output_grad_channel(output_grad->type()); Tensor transformed_input_grad_channel(input->type()); Tensor transformed_filter_channel(filter->type()); Tensor transformed_filter_grad_channel(filter->type()); if (channel_last && compute_format == DataLayout::kNCHW) { VLOG(3) << "Transform input, output_grad, input_grad and tensor from " "NHWC to NCHW."; ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, input, &transformed_input_channel); TransToChannelFirst<platform::CUDADeviceContext, T>( ctx, input, &transformed_input_channel); ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, output_grad, &transformed_output_grad_channel); TransToChannelFirst<platform::CUDADeviceContext, T>( ctx, output_grad, &transformed_output_grad_channel); if (input_grad) { ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, input_grad, &transformed_input_grad_channel); // NOTE(zhiqiu): If inplace_addto strategy is enabled, we need to copy // the data of input_grad to transformed_input_grad_channel. if (ctx.Attr<bool>("use_addto")) { TransToChannelFirst<platform::CUDADeviceContext, T>( ctx, input_grad, &transformed_input_grad_channel); } } } else { transformed_input_channel.ShareDataWith(*input); transformed_output_grad_channel.ShareDataWith(*output_grad); if (input_grad) { transformed_input_grad_channel.ShareDataWith(*input_grad); } } if (compute_format == DataLayout::kNHWC) { VLOG(3) << "Transform filter and filter_grad tensor from NCHW to NHWC."; ResizeToChannelLast<platform::CUDADeviceContext, T>( ctx, filter, &transformed_filter_channel); TransToChannelLast<platform::CUDADeviceContext, T>( ctx, filter, &transformed_filter_channel); if (filter_grad) { ResizeToChannelLast<platform::CUDADeviceContext, T>( ctx, filter_grad, &transformed_filter_grad_channel); } } else { transformed_filter_channel.ShareDataWith(*filter); if (filter_grad) { transformed_filter_grad_channel.ShareDataWith(*filter_grad); } } // update paddings auto in_dims = transformed_input_channel.dims(); auto filter_dims = transformed_filter_channel.dims(); framework::DDim in_data_dims; framework::DDim filter_data_dims; if (compute_format == DataLayout::kNCHW) { in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size()); filter_data_dims = framework::slice_ddim(filter_dims, 2, filter_dims.size()); } else { in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1); filter_data_dims = framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1); } std::vector<int> ksize = framework::vectorize<int>(filter_data_dims); UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); // cuDNN only supports padding the same amount on every dimension. // So we create a new padded input tensor. int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); Tensor transformed_input(input->type()); Tensor transformed_input_grad(input->type()); std::vector<int> padding_common(data_dim, 0); std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0); if (!is_sys_pad) { // get pad std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_input_channel.dims()[0]; if (compute_format == DataLayout::kNCHW) { new_input_shape_vec[1] = transformed_input_channel.dims()[1]; } else { new_input_shape_vec[data_dim + 1] = transformed_input_channel.dims()[data_dim + 1]; } for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]); if (compute_format == DataLayout::kNCHW) { new_input_shape_vec[i + 2] = transformed_input_channel.dims()[i + 2] + padding_diff[i]; } else { new_input_shape_vec[i + 1] = transformed_input_channel.dims()[i + 1] + padding_diff[i]; } if (compute_format == DataLayout::kNCHW) { input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } else { input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i]; } } framework::DDim new_input_shape( framework::make_ddim(new_input_shape_vec)); transformed_input.Resize(new_input_shape); transformed_input_grad.Resize(new_input_shape); auto& dev_ctx = ctx.template device_context<paddle::platform::CUDADeviceContext>(); transformed_input = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); if (input_grad) { transformed_input_grad = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); } // pad for input const int rank = transformed_input_channel.dims().size(); T pad_value(0.0); switch (rank) { case 4: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; case 5: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; default: PADDLE_THROW(platform::errors::InvalidArgument( "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_input.ShareDataWith(transformed_input_channel); if (input_grad) { transformed_input_grad.ShareDataWith(transformed_input_grad_channel); } if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* input_data = transformed_input.data<T>(); const T* output_grad_data = transformed_output_grad_channel.data<T>(); const T* filter_data = transformed_filter_channel.data<T>(); T* filter_grad_data = nullptr; T* input_grad_data = nullptr; T* transformed_input_grad_data = nullptr; ConvArgs args1{&transformed_input_grad, &transformed_filter_channel, &transformed_output_grad_channel, strides, padding_common, dilations, dtype}; ConvArgs args2{&transformed_input, &transformed_filter_grad_channel, &transformed_output_grad_channel, strides, padding_common, dilations, dtype}; auto handle = dev_ctx.cudnn_handle(); DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; if (transformed_input.dims().size() == 5) { layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC : DataLayout::kNCDHW; } auto layout_tensor = GetCudnnTensorFormat(layout); auto workspace_handle = dev_ctx.cudnn_workspace_handle(); int i_n, i_c, i_d, i_h, i_w; int o_n, o_c, o_d, o_h, o_w; if (compute_format == DataLayout::kNHWC) { GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d, &i_h, &i_w); GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNHWC, &o_n, &o_c, &o_d, &o_h, &o_w); } else { GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); } int group_offset_in = i_c / groups * i_h * i_w * i_d; int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = transformed_filter_channel.numel() / groups; // ------------------- cudnn backward algorithm --------------------- #ifdef PADDLE_WITH_HIP miopenConvBwdDataAlgorithm_t data_algo = static_cast<miopenConvBwdDataAlgorithm_t>(0); miopenConvBwdWeightsAlgorithm_t filter_algo = static_cast<miopenConvBwdWeightsAlgorithm_t>(0); #else cudnnConvolutionBwdDataAlgo_t data_algo = static_cast<cudnnConvolutionBwdDataAlgo_t>(0); cudnnConvolutionBwdFilterAlgo_t filter_algo = static_cast<cudnnConvolutionBwdFilterAlgo_t>(0); #endif size_t workspace_size = 0; int iwo_groups = groups; int c_groups = 1; #if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) iwo_groups = 1; c_groups = groups; groups = 1; #endif if (input_grad) { // ------------------- cudnn descriptors --------------------- input_grad_data = input_grad->data<T>(); transformed_input_grad_data = transformed_input_grad.data<T>(); args1.handle = handle; args1.idesc.set(transformed_input_grad, layout_tensor); args1.wdesc.set(transformed_filter_channel, layout_tensor, iwo_groups); args1.odesc.set(transformed_output_grad_channel, layout_tensor); args1.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn(), c_groups); #ifdef PADDLE_WITH_HIP using search1 = SearchAlgorithm<miopenConvBwdDataAlgorithm_t>; workspace_size = ::max(workspace_size, search1::GetWorkspaceSize(args1)); data_algo = search1::Find<T>(args1, exhaustive_search, deterministic, workspace_size, ctx); #else using search1 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>; data_algo = search1::Find<T>(args1, exhaustive_search, deterministic, ctx); workspace_size = ::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo)); #endif } if (filter_grad) { // ------------------- cudnn descriptors --------------------- filter_grad_data = transformed_filter_grad_channel.data<T>(); args2.handle = handle; args2.idesc.set(transformed_input, layout_tensor); args2.wdesc.set(transformed_filter_grad_channel, layout_tensor, iwo_groups); args2.odesc.set(transformed_output_grad_channel, layout_tensor); args2.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn(), c_groups); #ifdef PADDLE_WITH_HIP using search2 = SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>; workspace_size = ::max(workspace_size, search2::GetWorkspaceSize(args2)); filter_algo = search2::Find<T>(args2, exhaustive_search, deterministic, workspace_size, ctx); #else using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>; filter_algo = search2::Find<T>(args2, exhaustive_search, deterministic, ctx); workspace_size = ::max(workspace_size, search2::GetWorkspaceSize(args2, filter_algo)); #endif } // ------------------- cudnn conv backward data --------------------- ScalingParamType<T> alpha = 1.0f; ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f; VLOG(4) << "Conv_grad: use_addto = " << ctx.Attr<bool>("use_addto"); if (input_grad) { // When beta is 0, it is unnecessary to reset input_grad. // When beta is 1, the output cannot be reset since addt strategy used. #ifdef PADDLE_WITH_HIP workspace_handle.RunFunc( [&](void* cudnn_workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args1.odesc.desc(), output_grad_data, args1.wdesc.desc(), filter_data, args1.cdesc.desc(), data_algo, &beta, args1.idesc.desc(), transformed_input_grad_data, cudnn_workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { workspace_handle.RunFunc( [&](void* cudnn_workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args1.wdesc.desc(), filter_data + i * group_offset_filter, args1.odesc.desc(), output_grad_data + i * group_offset_out, args1.cdesc.desc(), data_algo, cudnn_workspace_ptr, workspace_size, &beta, args1.idesc.desc(), transformed_input_grad_data + i * group_offset_in)); }, workspace_size); } #endif if (!is_sys_pad) { std::vector<int> starts(transformed_input_channel.dims().size(), 0); std::vector<int> axes(transformed_input_channel.dims().size(), 0); for (size_t i = 0; i < transformed_input_channel.dims().size(); ++i) { starts[i] = input_pad[2 * i]; axes[i] = i; } transformed_input_grad_channel.mutable_data(ctx.GetPlace()); if (transformed_input_channel.dims().size() == 4) { RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>( ctx, &transformed_input_grad, &transformed_input_grad_channel, starts, axes); } else { RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>( ctx, &transformed_input_grad, &transformed_input_grad_channel, starts, axes); } } if (channel_last && compute_format == DataLayout::kNCHW) { TransToChannelLast<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_input_grad_channel, input_grad); } } // filter_grad do not use inplace addto. ScalingParamType<T> beta_filter = 0.0f; // ------------------- cudnn conv backward filter --------------------- if (filter_grad) { // Because beta is zero, it is unnecessary to reset filter_grad. #ifdef PADDLE_WITH_HIP workspace_handle.RunFunc( [&](void* cudnn_workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::miopenConvolutionBackwardWeights( handle, &alpha, args2.odesc.desc(), output_grad_data, args2.idesc.desc(), input_data, args2.cdesc.desc(), filter_algo, &beta, args2.wdesc.desc(), filter_grad_data, cudnn_workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { workspace_handle.RunFunc( [&](void* cudnn_workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, args2.idesc.desc(), input_data + i * group_offset_in, args2.odesc.desc(), output_grad_data + i * group_offset_out, args2.cdesc.desc(), filter_algo, cudnn_workspace_ptr, workspace_size, &beta_filter, args2.wdesc.desc(), filter_grad_data + i * group_offset_filter)); }, workspace_size); } #endif if (compute_format == DataLayout::kNHWC) { TransToChannelFirst<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_filter_grad_channel, filter_grad); } } } }; /* * Inputs: I, W, dO, ddI, ddW * Outputs: ddO, dW, dI * ddo = conv(ddI, W) + conv(I, ddW) * dW = conv_bp_filter(ddI, dO) * dI = conv_bp_data(ddW, dO) */ template <typename T> class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); auto X = ctx.Input<Tensor>("Input"); auto W = ctx.Input<Tensor>("Filter"); auto dO = ctx.Input<Tensor>("DOutput"); auto ddX = ctx.Input<Tensor>("DDInput"); auto ddW = ctx.Input<Tensor>("DDFilter"); auto ddO = ctx.Output<Tensor>("DDOutput"); auto dW = ctx.Output<Tensor>("DFilter"); auto dX = ctx.Output<Tensor>("DInput"); if (ddO) { ddO->mutable_data<T>(ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, T> set_zero; set_zero(dev_ctx, ddO, static_cast<T>(0)); } if (dW) { dW->mutable_data<T>(ctx.GetPlace()); } if (dX) { dX->mutable_data<T>(ctx.GetPlace()); } // const T* x = X->data<T>(); const T* dy = dO->data<T>(); const T* w = W->data<T>(); const T* ddx = nullptr; const T* ddw = nullptr; T *dw, *dx, *ddy; dw = dx = ddy = nullptr; T* transformed_dx = nullptr; const std::vector<int>& strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); int groups = ctx.Attr<int>("groups"); bool exhaustive_search = FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search"); bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, platform::errors::InvalidArgument( "Cann't set exhaustive_search True and " "FLAGS_cudnn_deterministic True at same time.")); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm"); const std::string data_format = ctx.Attr<std::string>("data_format"); const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); // transform Tensors to channel first----------- Tensor transformed_X_channel(X->type()); Tensor transformed_dO_channel(dO->type()); Tensor transformed_ddX_channel(X->type()); Tensor transformed_ddO_channel(dO->type()); Tensor transformed_dX_channel(X->type()); if (channel_last) { ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, X, &transformed_X_channel); TransToChannelFirst<platform::CUDADeviceContext, T>( ctx, X, &transformed_X_channel); ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, dO, &transformed_dO_channel); TransToChannelFirst<platform::CUDADeviceContext, T>( ctx, dO, &transformed_dO_channel); if (ddX) { ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, ddX, &transformed_ddX_channel); TransToChannelFirst<platform::CUDADeviceContext, T>( ctx, ddX, &transformed_ddX_channel); } if (ddO) { ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, ddO, &transformed_ddO_channel); } if (dX) { ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, dX, &transformed_dX_channel); transformed_dX_channel.mutable_data<T>(ctx.GetPlace()); } } else { transformed_X_channel = *X; transformed_dO_channel = *dO; if (ddX) { transformed_ddX_channel = *ddX; } if (ddO) { transformed_ddO_channel.ShareDataWith(*ddO); } if (dX) { transformed_dX_channel.ShareDataWith(*dX); } } auto in_dims = transformed_X_channel.dims(); auto filter_dims = W->dims(); framework::DDim in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size()); framework::DDim filter_data_dims = framework::slice_ddim(filter_dims, 2, filter_dims.size()); std::vector<int> ksize = framework::vectorize<int>(filter_data_dims); UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); Tensor transformed_X(X->type()); Tensor transformed_ddX(X->type()); Tensor transformed_dX(X->type()); std::vector<int> padding_common(data_dim, 0); std::vector<int> input_pad(X->dims().size() * 2, 0); if (!is_sys_pad) { // get pad std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_X_channel.dims()[0]; new_input_shape_vec[1] = transformed_X_channel.dims()[1]; for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]); new_input_shape_vec[i + 2] = transformed_X_channel.dims()[i + 2] + padding_diff[i]; input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } framework::DDim new_input_shape( framework::make_ddim(new_input_shape_vec)); transformed_X.Resize(new_input_shape); transformed_ddX.Resize(new_input_shape); transformed_dX.Resize(new_input_shape); transformed_X = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); if (ddX) { transformed_ddX = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); } if (dX) { transformed_dX = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); } // pad for input const int rank = X->dims().size(); T pad_value(0.0); switch (rank) { case 4: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>( ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; case 5: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>( ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; default: PADDLE_THROW(platform::errors::InvalidArgument( "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_X.ShareDataWith(transformed_X_channel); if (ddX) { transformed_ddX.ShareDataWith(transformed_ddX_channel); } if (dX) { transformed_dX.ShareDataWith(transformed_dX_channel); } if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* x = transformed_X.data<T>(); int iwo_group = groups; int c_group = 1; #if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) iwo_group = 1; c_group = groups; groups = 1; #endif auto dtype = platform::CudnnDataType<T>::type; auto handle = dev_ctx.cudnn_handle(); ConvArgs args1{&transformed_ddX, W, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; ConvArgs args2{ &transformed_X, ddW, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; ConvArgs args3{&transformed_ddX, dW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; ConvArgs args4{ &transformed_dX, ddW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; #ifdef PADDLE_WITH_HIP miopenConvFwdAlgorithm_t fwd_algo1 = static_cast<miopenConvFwdAlgorithm_t>(0); miopenConvFwdAlgorithm_t fwd_algo2 = static_cast<miopenConvFwdAlgorithm_t>(0); miopenConvBwdDataAlgorithm_t data_algo = static_cast<miopenConvBwdDataAlgorithm_t>(0); miopenConvBwdWeightsAlgorithm_t filter_algo = static_cast<miopenConvBwdWeightsAlgorithm_t>(0); #else cudnnConvolutionFwdAlgo_t fwd_algo1 = static_cast<cudnnConvolutionFwdAlgo_t>(0); cudnnConvolutionFwdAlgo_t fwd_algo2 = static_cast<cudnnConvolutionFwdAlgo_t>(0); cudnnConvolutionBwdDataAlgo_t data_algo = static_cast<cudnnConvolutionBwdDataAlgo_t>(0); cudnnConvolutionBwdFilterAlgo_t filter_algo = static_cast<cudnnConvolutionBwdFilterAlgo_t>(0); #endif auto layout = GetCudnnTensorFormat(DataLayout::kNCHW); // ddo = conv(ddI, W) + conv(I, ddW) size_t workspace_size = 0; T* transformed_ddy_channel = nullptr; if (ddO) { ddy = ddO->data<T>(); transformed_ddy_channel = transformed_ddO_channel.data<T>(); if (ddX) { args1.handle = handle; args1.idesc.set(transformed_ddX, iwo_group); args1.wdesc.set(*W, layout, iwo_group); args1.odesc.set(transformed_ddO_channel, iwo_group); args1.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search1 = SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = search1::GetWorkspaceSize(args1); fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, workspace_size, ctx); #else using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, ctx); workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1); #endif } if (ddW) { ddw = ddW->data<T>(); args2.handle = handle; args2.idesc.set(transformed_X, iwo_group); args2.wdesc.set(*ddW, layout, iwo_group); args2.odesc.set(transformed_ddO_channel, iwo_group); args2.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search2 = SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = ::max(workspace_size, search2::GetWorkspaceSize(args2)); fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, workspace_size, ctx); #else using search2 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, ctx); workspace_size = ::max(workspace_size, search2::GetWorkspaceSize(args2, fwd_algo2)); #endif } } if (dW && ddX) { dw = dW->data<T>(); args3.handle = handle; args3.idesc.set(transformed_ddX, iwo_group); args3.wdesc.set(*dW, layout, iwo_group); args3.odesc.set(transformed_dO_channel, iwo_group); args3.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search3 = SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>; workspace_size = ::max(workspace_size, search3::GetWorkspaceSize(args3)); filter_algo = search3::Find<T>(args3, exhaustive_search, deterministic, workspace_size, ctx); #else using search3 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>; filter_algo = search3::Find<T>(args3, exhaustive_search, deterministic, ctx); workspace_size = ::max(workspace_size, search3::GetWorkspaceSize(args3, filter_algo)); #endif } if (ddW && dX) { transformed_dx = transformed_dX.data<T>(); args4.handle = handle; args4.idesc.set(transformed_dX, iwo_group); args4.wdesc.set(*ddW, layout, iwo_group); args4.odesc.set(transformed_dO_channel, iwo_group); args4.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search4 = SearchAlgorithm<miopenConvBwdDataAlgorithm_t>; workspace_size = ::max(workspace_size, search4::GetWorkspaceSize(args4)); data_algo = search4::Find<T>(args4, exhaustive_search, deterministic, workspace_size, ctx); #else using search4 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>; data_algo = search4::Find<T>(args4, exhaustive_search, deterministic, ctx); workspace_size = ::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo)); #endif } int i_n, i_c, i_d, i_h, i_w; GetNCDHW(transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); int o_n, o_c, o_d, o_h, o_w; GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); int group_offset_in = i_c / groups * i_h * i_w * i_d; int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = W->numel() / groups; ScalingParamType<T> alpha = 1.0f; ScalingParamType<T> beta = 0.0f; // NOTE(zhiqiu): inplace addto is not supportted in double grad yet. // ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : // 0.0f; // VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto"); auto wkspace_handle = dev_ctx.cudnn_workspace_handle(); if (ddO) { if (ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::miopenConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx, args1.wdesc.desc(), w, args1.cdesc.desc(), fwd_algo1, &beta, args1.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx + i * group_offset_in, args1.wdesc.desc(), w + i * group_offset_filter, args1.cdesc.desc(), fwd_algo1, workspace_ptr, workspace_size, &beta, args1.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (ddW) { #ifdef PADDLE_WITH_HIP // MIOPEN ONLY support beta to be 0.0f wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::miopenConvolutionForward( handle, &alpha, args2.idesc.desc(), x, args2.wdesc.desc(), ddw, args2.cdesc.desc(), fwd_algo2, &beta, args2.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionForward( handle, &alpha, args2.idesc.desc(), x + i * group_offset_in, args2.wdesc.desc(), ddw + i * group_offset_filter, args2.cdesc.desc(), fwd_algo2, workspace_ptr, workspace_size, &alpha, args2.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (channel_last) { TransToChannelLast<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_ddO_channel, ddO); } } T* transformed_dy_channel = transformed_dO_channel.data<T>(); if (dW && ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::miopenConvolutionBackwardWeights( handle, &alpha, args3.odesc.desc(), transformed_dy_channel, args3.idesc.desc(), ddx, args3.cdesc.desc(), filter_algo, &beta, args3.wdesc.desc(), dw, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, args3.idesc.desc(), ddx + i * group_offset_in, args3.odesc.desc(), transformed_dy_channel + i * group_offset_out, args3.cdesc.desc(), filter_algo, workspace_ptr, workspace_size, &beta, args3.wdesc.desc(), dw + i * group_offset_filter)); }, workspace_size); } #endif } if (dX && ddW) { ddw = ddW->data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args4.odesc.desc(), transformed_dy_channel, args4.wdesc.desc(), ddw, args4.cdesc.desc(), data_algo, &beta, args4.idesc.desc(), transformed_dx, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args4.wdesc.desc(), ddw + i * group_offset_filter, args4.odesc.desc(), transformed_dy_channel + i * group_offset_out, args4.cdesc.desc(), data_algo, workspace_ptr, workspace_size, &beta, args4.idesc.desc(), transformed_dx + i * group_offset_in)); }, workspace_size); } #endif if (!is_sys_pad) { // reverse padded input std::vector<int> starts(X->dims().size(), 0); std::vector<int> axes(X->dims().size(), 0); for (size_t i = 0; i < X->dims().size(); ++i) { starts[i] = input_pad[2 * i]; axes[i] = i; } if (X->dims().size() == 4) { RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } else { RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } } if (channel_last) { TransToChannelLast<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_dX_channel, dX); } } } }; } // namespace operators } // namespace paddle namespace plat = paddle::platform; #ifdef PADDLE_WITH_HIP // MIOPEN do not support double REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel<float>, paddle::operators::CUDNNConvOpKernel<plat::float16>); REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvGradOpKernel<float>, paddle::operators::CUDNNConvGradOpKernel<plat::float16>); REGISTER_OP_KERNEL( conv2d_grad_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvDoubleGradOpKernel<float>, paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>); // ROCM has limit thread in depthwise_conv.cu and willl result in accuracy issue // Use depthwise_conv2d in MIOPEN to resolve this issue REGISTER_OP_KERNEL(depthwise_conv2d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel<float>, paddle::operators::CUDNNConvOpKernel<plat::float16>); REGISTER_OP_KERNEL(depthwise_conv2d_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvGradOpKernel<float>, paddle::operators::CUDNNConvGradOpKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL( depthwise_conv2d_grad_grad, paddle::operators::CUDNNConvDoubleGradOpKernel<float>, paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>); REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel<float>, paddle::operators::CUDNNConvOpKernel<plat::float16>); REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvGradOpKernel<float>); REGISTER_OP_KERNEL( conv3d_grad_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvDoubleGradOpKernel<float>, paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>); #else REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel<float>, paddle::operators::CUDNNConvOpKernel<double>, paddle::operators::CUDNNConvOpKernel<plat::float16>); REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvGradOpKernel<float>, paddle::operators::CUDNNConvGradOpKernel<double>, paddle::operators::CUDNNConvGradOpKernel<plat::float16>); REGISTER_OP_KERNEL( conv2d_grad_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvDoubleGradOpKernel<float>, paddle::operators::CUDNNConvDoubleGradOpKernel<double>, paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL( depthwise_conv2d_grad_grad, paddle::operators::CUDNNConvDoubleGradOpKernel<float>, paddle::operators::CUDNNConvDoubleGradOpKernel<double>, paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>); REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel<float>, paddle::operators::CUDNNConvOpKernel<double>, paddle::operators::CUDNNConvOpKernel<plat::float16>); REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvGradOpKernel<float>, paddle::operators::CUDNNConvGradOpKernel<double>); REGISTER_OP_KERNEL( conv3d_grad_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvDoubleGradOpKernel<float>, paddle::operators::CUDNNConvDoubleGradOpKernel<double>, paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>); #endif
b68572eb81e56a606b71173f6e43541a4937fb8e.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the spopecific language governing permissions and limitations under the License. */ #include <utility> #include <vector> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/memory/memory.h" #ifdef PADDLE_WITH_HIP #include "paddle/fluid/operators/conv_miopen_helper.h" #else #include "paddle/fluid/operators/conv_cudnn_helper.h" #endif #include "paddle/fluid/operators/conv_op.h" #include "paddle/fluid/operators/math/padding.h" #include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/profiler.h" DECLARE_bool(cudnn_deterministic); DECLARE_uint64(conv_workspace_size_limit); DECLARE_bool(cudnn_exhaustive_search); namespace paddle { namespace operators { using Tensor = framework::Tensor; using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; using DataLayout = platform::DataLayout; static inline bool IsVoltaOrLater(const platform::CUDADeviceContext& dev_ctx) { return dev_ctx.GetComputeCapability() >= 70; } template <typename T> class CUDNNConvOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); const Tensor* input = ctx.Input<Tensor>("Input"); auto* filter = ctx.Input<Tensor>("Filter"); auto* output = ctx.Output<Tensor>("Output"); output->mutable_data<T>(ctx.GetPlace()); const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); int groups = ctx.Attr<int>("groups"); bool exhaustive_search = FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search"); bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, platform::errors::InvalidArgument( "Cann't set exhaustive_search True and " "FLAGS_cudnn_deterministic True at same time.")); const std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm"); const std::string data_format = ctx.Attr<std::string>("data_format"); const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); auto dtype = platform::CudnnDataType<T>::type; #ifdef PADDLE_WITH_HIP // HIP MIOPEN ONLY SUPPORT NCHW format auto compute_format = DataLayout::kNCHW; #else // Tensor Core introduced from Volta GPUs supports more faster conv op // with FP16 in NHWC data format. const bool compute_in_nhwc = dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx); // We will only do data format conversion from NHWC to NCHW. // cudnn will convert NCHW to NHWC automatically on Tensor Core. auto compute_format = compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW; #endif VLOG(3) << "Compute ConvOp with cuDNN:" << " data_format=" << data_format << " compute_format=" << (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW"); // ------------ transformed tensor ----------- Tensor transformed_input_channel(input->type()); Tensor transformed_output(output->type()); Tensor transformed_filter_channel(filter->type()); T* output_data = nullptr; if (channel_last && compute_format == DataLayout::kNCHW) { VLOG(3) << "Transform input tensor from NHWC to NCHW."; ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, input, &transformed_input_channel); TransToChannelFirst<platform::CUDADeviceContext, T>( ctx, input, &transformed_input_channel); ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, output, &transformed_output); } else { transformed_input_channel.ShareDataWith(*input); transformed_output.ShareDataWith(*output); } if (compute_format == DataLayout::kNHWC) { VLOG(3) << "Transform filter tensor from NCHW to NHWC."; ResizeToChannelLast<platform::CUDADeviceContext, T>( ctx, filter, &transformed_filter_channel); TransToChannelLast<platform::CUDADeviceContext, T>( ctx, filter, &transformed_filter_channel); } else { transformed_filter_channel.ShareDataWith(*filter); } output_data = transformed_output.data<T>(); // update padding and dilation auto in_dims = transformed_input_channel.dims(); auto filter_dims = transformed_filter_channel.dims(); framework::DDim in_data_dims; framework::DDim filter_data_dims; if (compute_format == DataLayout::kNCHW) { in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size()); filter_data_dims = framework::slice_ddim(filter_dims, 2, filter_dims.size()); } else { in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1); filter_data_dims = framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1); } std::vector<int> ksize = framework::vectorize<int>(filter_data_dims); UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); Tensor transformed_input; std::vector<int> padding_common(data_dim, 0); if (!is_sys_pad) { std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_input_channel.dims()[0]; if (compute_format == DataLayout::kNCHW) { new_input_shape_vec[1] = transformed_input_channel.dims()[1]; } else { new_input_shape_vec[data_dim + 1] = transformed_input_channel.dims()[data_dim + 1]; } std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0); for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]); if (compute_format == DataLayout::kNCHW) { new_input_shape_vec[i + 2] = transformed_input_channel.dims()[i + 2] + padding_diff[i]; } else { new_input_shape_vec[i + 1] = transformed_input_channel.dims()[i + 1] + padding_diff[i]; } if (compute_format == DataLayout::kNCHW) { input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } else { input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i]; } } framework::DDim new_input_shape( framework::make_ddim(new_input_shape_vec)); transformed_input.Resize(new_input_shape); auto& dev_ctx = ctx.template device_context<paddle::platform::CUDADeviceContext>(); transformed_input = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); const int rank = transformed_input_channel.dims().size(); T pad_value(0.0); switch (rank) { case 4: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; case 5: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; default: PADDLE_THROW(platform::errors::InvalidArgument( "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_input.ShareDataWith(transformed_input_channel); if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* input_data = transformed_input.data<T>(); const T* filter_data = transformed_filter_channel.data<T>(); // ------------------- cudnn descriptors --------------------- ConvArgs args{&transformed_input, &transformed_filter_channel, &transformed_output, strides, padding_common, dilations, dtype}; auto handle = dev_ctx.cudnn_handle(); auto workspace_handle = dev_ctx.cudnn_workspace_handle(); DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; if (transformed_input.dims().size() == 5) { layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC : DataLayout::kNCDHW; } auto layout_format = GetCudnnTensorFormat(layout); args.handle = handle; #ifdef PADDLE_WITH_HIP // MIOPEN need to set groups in cdesc in miopen_desc.h args.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn(), groups); #else args.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn()); #endif #if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION_MIN(7, 0, 1) // cudnn 7 can support groups, no need to do it manually // FIXME(typhoonzero): find a better way to disable groups // rather than setting it to 1. PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnSetConvolutionGroupCount(args.cdesc.desc(), groups)); groups = 1; #endif #ifdef PADDLE_WITH_HIP // MIOPEN do not set groups in wdesc after set groups in cdesc groups = 1; #endif args.idesc.set(transformed_input, layout_format); args.wdesc.set(transformed_filter_channel, layout_format, groups); args.odesc.set(transformed_output, layout_format); int i_n, i_c, i_d, i_h, i_w; int o_n, o_c, o_d, o_h, o_w; if (compute_format == DataLayout::kNHWC) { GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d, &i_h, &i_w); GetNCDHW(transformed_output.dims(), DataLayout::kNHWC, &o_n, &o_c, &o_d, &o_h, &o_w); } else { GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); GetNCDHW(transformed_output.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); } int group_offset_in = i_c / groups * i_h * i_w * i_d; int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = transformed_filter_channel.numel() / groups; // ------------------- cudnn conv workspace --------------------- size_t workspace_size = 0; // final workspace to allocate. // ------------------- cudnn conv algorithm --------------------- #ifdef PADDLE_WITH_HIP miopenConvFwdAlgorithm_t algo{}; using search = SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = search::GetWorkspaceSize(args); algo = search::Find<T>(args, exhaustive_search, false, workspace_size, ctx); #else cudnnConvolutionFwdAlgo_t algo{}; using search = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; algo = search::Find<T>(args, exhaustive_search, false, ctx); workspace_size = search::GetWorkspaceSize(args, algo); #endif #if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION_MIN(7, 0, 1) // when groups > 1, SearchAlgorithm find algo is CUDNN_CONVOLUTION_\ // FWD_ALGO_WINOGRAD_NONFUSED, but this kind of algorithm is unstable // in forward computation, so change the algorithm to CUDNN_CONVOLUTION_\ // FWD_ALGO_IMPLICIT_GEMM manually. if (ctx.Attr<int>("groups") > 1) { algo = static_cast<cudnnConvolutionFwdAlgo_t>(0); } #endif // ------------------- cudnn conv forward --------------------- ScalingParamType<T> alpha = 1.0f; ScalingParamType<T> beta = 0.0f; // NOTE(zhiqiu): inplace addto is not supportted in double grad yet. // ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f; // VLOG(4) << "Conv: use_addto = " << ctx.Attr<bool>("use_addto"); #ifdef PADDLE_WITH_HIP workspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::miopenConvolutionForward( handle, &alpha, args.idesc.desc(), input_data, args.wdesc.desc(), filter_data, args.cdesc.desc(), algo, &beta, args.odesc.desc(), output_data, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { workspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionForward( handle, &alpha, args.idesc.desc(), input_data + i * group_offset_in, args.wdesc.desc(), filter_data + i * group_offset_filter, args.cdesc.desc(), algo, workspace_ptr, workspace_size, &beta, args.odesc.desc(), output_data + i * group_offset_out)); }, workspace_size); } #endif if (channel_last && compute_format == DataLayout::kNCHW) { TransToChannelLast<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_output, output); } } }; template <typename T> class CUDNNConvGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); auto input = ctx.Input<Tensor>("Input"); auto filter = ctx.Input<Tensor>("Filter"); auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input")); auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter")); if (input_grad) { input_grad->mutable_data<T>(ctx.GetPlace()); } if (filter_grad) { filter_grad->mutable_data<T>(ctx.GetPlace()); } std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm"); int groups = ctx.Attr<int>("groups"); bool exhaustive_search = FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search"); bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, platform::errors::InvalidArgument( "Cann't set exhaustive_search True and " "FLAGS_cudnn_deterministic True at same time.")); const std::string data_format = ctx.Attr<std::string>("data_format"); const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); auto dtype = platform::CudnnDataType<T>::type; #ifdef PADDLE_WITH_HIP // HIP MIOPEN ONLY SUPPORT NCHW format auto compute_format = DataLayout::kNCHW; #else const bool compute_in_nhwc = dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx); auto compute_format = compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW; #endif VLOG(3) << "Compute ConvGradOp with cuDNN:" << " data_format=" << data_format << " compute_format=" << (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW"); // transform Tensor Tensor transformed_input_channel(input->type()); Tensor transformed_output_grad_channel(output_grad->type()); Tensor transformed_input_grad_channel(input->type()); Tensor transformed_filter_channel(filter->type()); Tensor transformed_filter_grad_channel(filter->type()); if (channel_last && compute_format == DataLayout::kNCHW) { VLOG(3) << "Transform input, output_grad, input_grad and tensor from " "NHWC to NCHW."; ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, input, &transformed_input_channel); TransToChannelFirst<platform::CUDADeviceContext, T>( ctx, input, &transformed_input_channel); ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, output_grad, &transformed_output_grad_channel); TransToChannelFirst<platform::CUDADeviceContext, T>( ctx, output_grad, &transformed_output_grad_channel); if (input_grad) { ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, input_grad, &transformed_input_grad_channel); // NOTE(zhiqiu): If inplace_addto strategy is enabled, we need to copy // the data of input_grad to transformed_input_grad_channel. if (ctx.Attr<bool>("use_addto")) { TransToChannelFirst<platform::CUDADeviceContext, T>( ctx, input_grad, &transformed_input_grad_channel); } } } else { transformed_input_channel.ShareDataWith(*input); transformed_output_grad_channel.ShareDataWith(*output_grad); if (input_grad) { transformed_input_grad_channel.ShareDataWith(*input_grad); } } if (compute_format == DataLayout::kNHWC) { VLOG(3) << "Transform filter and filter_grad tensor from NCHW to NHWC."; ResizeToChannelLast<platform::CUDADeviceContext, T>( ctx, filter, &transformed_filter_channel); TransToChannelLast<platform::CUDADeviceContext, T>( ctx, filter, &transformed_filter_channel); if (filter_grad) { ResizeToChannelLast<platform::CUDADeviceContext, T>( ctx, filter_grad, &transformed_filter_grad_channel); } } else { transformed_filter_channel.ShareDataWith(*filter); if (filter_grad) { transformed_filter_grad_channel.ShareDataWith(*filter_grad); } } // update paddings auto in_dims = transformed_input_channel.dims(); auto filter_dims = transformed_filter_channel.dims(); framework::DDim in_data_dims; framework::DDim filter_data_dims; if (compute_format == DataLayout::kNCHW) { in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size()); filter_data_dims = framework::slice_ddim(filter_dims, 2, filter_dims.size()); } else { in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1); filter_data_dims = framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1); } std::vector<int> ksize = framework::vectorize<int>(filter_data_dims); UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); // cuDNN only supports padding the same amount on every dimension. // So we create a new padded input tensor. int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); Tensor transformed_input(input->type()); Tensor transformed_input_grad(input->type()); std::vector<int> padding_common(data_dim, 0); std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0); if (!is_sys_pad) { // get pad std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_input_channel.dims()[0]; if (compute_format == DataLayout::kNCHW) { new_input_shape_vec[1] = transformed_input_channel.dims()[1]; } else { new_input_shape_vec[data_dim + 1] = transformed_input_channel.dims()[data_dim + 1]; } for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]); if (compute_format == DataLayout::kNCHW) { new_input_shape_vec[i + 2] = transformed_input_channel.dims()[i + 2] + padding_diff[i]; } else { new_input_shape_vec[i + 1] = transformed_input_channel.dims()[i + 1] + padding_diff[i]; } if (compute_format == DataLayout::kNCHW) { input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } else { input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i]; } } framework::DDim new_input_shape( framework::make_ddim(new_input_shape_vec)); transformed_input.Resize(new_input_shape); transformed_input_grad.Resize(new_input_shape); auto& dev_ctx = ctx.template device_context<paddle::platform::CUDADeviceContext>(); transformed_input = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); if (input_grad) { transformed_input_grad = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); } // pad for input const int rank = transformed_input_channel.dims().size(); T pad_value(0.0); switch (rank) { case 4: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; case 5: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; default: PADDLE_THROW(platform::errors::InvalidArgument( "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_input.ShareDataWith(transformed_input_channel); if (input_grad) { transformed_input_grad.ShareDataWith(transformed_input_grad_channel); } if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* input_data = transformed_input.data<T>(); const T* output_grad_data = transformed_output_grad_channel.data<T>(); const T* filter_data = transformed_filter_channel.data<T>(); T* filter_grad_data = nullptr; T* input_grad_data = nullptr; T* transformed_input_grad_data = nullptr; ConvArgs args1{&transformed_input_grad, &transformed_filter_channel, &transformed_output_grad_channel, strides, padding_common, dilations, dtype}; ConvArgs args2{&transformed_input, &transformed_filter_grad_channel, &transformed_output_grad_channel, strides, padding_common, dilations, dtype}; auto handle = dev_ctx.cudnn_handle(); DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; if (transformed_input.dims().size() == 5) { layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC : DataLayout::kNCDHW; } auto layout_tensor = GetCudnnTensorFormat(layout); auto workspace_handle = dev_ctx.cudnn_workspace_handle(); int i_n, i_c, i_d, i_h, i_w; int o_n, o_c, o_d, o_h, o_w; if (compute_format == DataLayout::kNHWC) { GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d, &i_h, &i_w); GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNHWC, &o_n, &o_c, &o_d, &o_h, &o_w); } else { GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); } int group_offset_in = i_c / groups * i_h * i_w * i_d; int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = transformed_filter_channel.numel() / groups; // ------------------- cudnn backward algorithm --------------------- #ifdef PADDLE_WITH_HIP miopenConvBwdDataAlgorithm_t data_algo = static_cast<miopenConvBwdDataAlgorithm_t>(0); miopenConvBwdWeightsAlgorithm_t filter_algo = static_cast<miopenConvBwdWeightsAlgorithm_t>(0); #else cudnnConvolutionBwdDataAlgo_t data_algo = static_cast<cudnnConvolutionBwdDataAlgo_t>(0); cudnnConvolutionBwdFilterAlgo_t filter_algo = static_cast<cudnnConvolutionBwdFilterAlgo_t>(0); #endif size_t workspace_size = 0; int iwo_groups = groups; int c_groups = 1; #if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) iwo_groups = 1; c_groups = groups; groups = 1; #endif if (input_grad) { // ------------------- cudnn descriptors --------------------- input_grad_data = input_grad->data<T>(); transformed_input_grad_data = transformed_input_grad.data<T>(); args1.handle = handle; args1.idesc.set(transformed_input_grad, layout_tensor); args1.wdesc.set(transformed_filter_channel, layout_tensor, iwo_groups); args1.odesc.set(transformed_output_grad_channel, layout_tensor); args1.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn(), c_groups); #ifdef PADDLE_WITH_HIP using search1 = SearchAlgorithm<miopenConvBwdDataAlgorithm_t>; workspace_size = std::max(workspace_size, search1::GetWorkspaceSize(args1)); data_algo = search1::Find<T>(args1, exhaustive_search, deterministic, workspace_size, ctx); #else using search1 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>; data_algo = search1::Find<T>(args1, exhaustive_search, deterministic, ctx); workspace_size = std::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo)); #endif } if (filter_grad) { // ------------------- cudnn descriptors --------------------- filter_grad_data = transformed_filter_grad_channel.data<T>(); args2.handle = handle; args2.idesc.set(transformed_input, layout_tensor); args2.wdesc.set(transformed_filter_grad_channel, layout_tensor, iwo_groups); args2.odesc.set(transformed_output_grad_channel, layout_tensor); args2.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn(), c_groups); #ifdef PADDLE_WITH_HIP using search2 = SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>; workspace_size = std::max(workspace_size, search2::GetWorkspaceSize(args2)); filter_algo = search2::Find<T>(args2, exhaustive_search, deterministic, workspace_size, ctx); #else using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>; filter_algo = search2::Find<T>(args2, exhaustive_search, deterministic, ctx); workspace_size = std::max(workspace_size, search2::GetWorkspaceSize(args2, filter_algo)); #endif } // ------------------- cudnn conv backward data --------------------- ScalingParamType<T> alpha = 1.0f; ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f; VLOG(4) << "Conv_grad: use_addto = " << ctx.Attr<bool>("use_addto"); if (input_grad) { // When beta is 0, it is unnecessary to reset input_grad. // When beta is 1, the output cannot be reset since addt strategy used. #ifdef PADDLE_WITH_HIP workspace_handle.RunFunc( [&](void* cudnn_workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args1.odesc.desc(), output_grad_data, args1.wdesc.desc(), filter_data, args1.cdesc.desc(), data_algo, &beta, args1.idesc.desc(), transformed_input_grad_data, cudnn_workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { workspace_handle.RunFunc( [&](void* cudnn_workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args1.wdesc.desc(), filter_data + i * group_offset_filter, args1.odesc.desc(), output_grad_data + i * group_offset_out, args1.cdesc.desc(), data_algo, cudnn_workspace_ptr, workspace_size, &beta, args1.idesc.desc(), transformed_input_grad_data + i * group_offset_in)); }, workspace_size); } #endif if (!is_sys_pad) { std::vector<int> starts(transformed_input_channel.dims().size(), 0); std::vector<int> axes(transformed_input_channel.dims().size(), 0); for (size_t i = 0; i < transformed_input_channel.dims().size(); ++i) { starts[i] = input_pad[2 * i]; axes[i] = i; } transformed_input_grad_channel.mutable_data(ctx.GetPlace()); if (transformed_input_channel.dims().size() == 4) { RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>( ctx, &transformed_input_grad, &transformed_input_grad_channel, starts, axes); } else { RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>( ctx, &transformed_input_grad, &transformed_input_grad_channel, starts, axes); } } if (channel_last && compute_format == DataLayout::kNCHW) { TransToChannelLast<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_input_grad_channel, input_grad); } } // filter_grad do not use inplace addto. ScalingParamType<T> beta_filter = 0.0f; // ------------------- cudnn conv backward filter --------------------- if (filter_grad) { // Because beta is zero, it is unnecessary to reset filter_grad. #ifdef PADDLE_WITH_HIP workspace_handle.RunFunc( [&](void* cudnn_workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::miopenConvolutionBackwardWeights( handle, &alpha, args2.odesc.desc(), output_grad_data, args2.idesc.desc(), input_data, args2.cdesc.desc(), filter_algo, &beta, args2.wdesc.desc(), filter_grad_data, cudnn_workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { workspace_handle.RunFunc( [&](void* cudnn_workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, args2.idesc.desc(), input_data + i * group_offset_in, args2.odesc.desc(), output_grad_data + i * group_offset_out, args2.cdesc.desc(), filter_algo, cudnn_workspace_ptr, workspace_size, &beta_filter, args2.wdesc.desc(), filter_grad_data + i * group_offset_filter)); }, workspace_size); } #endif if (compute_format == DataLayout::kNHWC) { TransToChannelFirst<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_filter_grad_channel, filter_grad); } } } }; /* * Inputs: I, W, dO, ddI, ddW * Outputs: ddO, dW, dI * ddo = conv(ddI, W) + conv(I, ddW) * dW = conv_bp_filter(ddI, dO) * dI = conv_bp_data(ddW, dO) */ template <typename T> class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); auto X = ctx.Input<Tensor>("Input"); auto W = ctx.Input<Tensor>("Filter"); auto dO = ctx.Input<Tensor>("DOutput"); auto ddX = ctx.Input<Tensor>("DDInput"); auto ddW = ctx.Input<Tensor>("DDFilter"); auto ddO = ctx.Output<Tensor>("DDOutput"); auto dW = ctx.Output<Tensor>("DFilter"); auto dX = ctx.Output<Tensor>("DInput"); if (ddO) { ddO->mutable_data<T>(ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, T> set_zero; set_zero(dev_ctx, ddO, static_cast<T>(0)); } if (dW) { dW->mutable_data<T>(ctx.GetPlace()); } if (dX) { dX->mutable_data<T>(ctx.GetPlace()); } // const T* x = X->data<T>(); const T* dy = dO->data<T>(); const T* w = W->data<T>(); const T* ddx = nullptr; const T* ddw = nullptr; T *dw, *dx, *ddy; dw = dx = ddy = nullptr; T* transformed_dx = nullptr; const std::vector<int>& strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); int groups = ctx.Attr<int>("groups"); bool exhaustive_search = FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search"); bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, platform::errors::InvalidArgument( "Cann't set exhaustive_search True and " "FLAGS_cudnn_deterministic True at same time.")); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm"); const std::string data_format = ctx.Attr<std::string>("data_format"); const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); // transform Tensors to channel first----------- Tensor transformed_X_channel(X->type()); Tensor transformed_dO_channel(dO->type()); Tensor transformed_ddX_channel(X->type()); Tensor transformed_ddO_channel(dO->type()); Tensor transformed_dX_channel(X->type()); if (channel_last) { ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, X, &transformed_X_channel); TransToChannelFirst<platform::CUDADeviceContext, T>( ctx, X, &transformed_X_channel); ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, dO, &transformed_dO_channel); TransToChannelFirst<platform::CUDADeviceContext, T>( ctx, dO, &transformed_dO_channel); if (ddX) { ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, ddX, &transformed_ddX_channel); TransToChannelFirst<platform::CUDADeviceContext, T>( ctx, ddX, &transformed_ddX_channel); } if (ddO) { ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, ddO, &transformed_ddO_channel); } if (dX) { ResizeToChannelFirst<platform::CUDADeviceContext, T>( ctx, dX, &transformed_dX_channel); transformed_dX_channel.mutable_data<T>(ctx.GetPlace()); } } else { transformed_X_channel = *X; transformed_dO_channel = *dO; if (ddX) { transformed_ddX_channel = *ddX; } if (ddO) { transformed_ddO_channel.ShareDataWith(*ddO); } if (dX) { transformed_dX_channel.ShareDataWith(*dX); } } auto in_dims = transformed_X_channel.dims(); auto filter_dims = W->dims(); framework::DDim in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size()); framework::DDim filter_data_dims = framework::slice_ddim(filter_dims, 2, filter_dims.size()); std::vector<int> ksize = framework::vectorize<int>(filter_data_dims); UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); Tensor transformed_X(X->type()); Tensor transformed_ddX(X->type()); Tensor transformed_dX(X->type()); std::vector<int> padding_common(data_dim, 0); std::vector<int> input_pad(X->dims().size() * 2, 0); if (!is_sys_pad) { // get pad std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_X_channel.dims()[0]; new_input_shape_vec[1] = transformed_X_channel.dims()[1]; for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]); new_input_shape_vec[i + 2] = transformed_X_channel.dims()[i + 2] + padding_diff[i]; input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } framework::DDim new_input_shape( framework::make_ddim(new_input_shape_vec)); transformed_X.Resize(new_input_shape); transformed_ddX.Resize(new_input_shape); transformed_dX.Resize(new_input_shape); transformed_X = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); if (ddX) { transformed_ddX = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); } if (dX) { transformed_dX = ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>( new_input_shape, dev_ctx); } // pad for input const int rank = X->dims().size(); T pad_value(0.0); switch (rank) { case 4: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>( ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; case 5: { math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>( ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; default: PADDLE_THROW(platform::errors::InvalidArgument( "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_X.ShareDataWith(transformed_X_channel); if (ddX) { transformed_ddX.ShareDataWith(transformed_ddX_channel); } if (dX) { transformed_dX.ShareDataWith(transformed_dX_channel); } if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* x = transformed_X.data<T>(); int iwo_group = groups; int c_group = 1; #if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) iwo_group = 1; c_group = groups; groups = 1; #endif auto dtype = platform::CudnnDataType<T>::type; auto handle = dev_ctx.cudnn_handle(); ConvArgs args1{&transformed_ddX, W, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; ConvArgs args2{ &transformed_X, ddW, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; ConvArgs args3{&transformed_ddX, dW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; ConvArgs args4{ &transformed_dX, ddW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; #ifdef PADDLE_WITH_HIP miopenConvFwdAlgorithm_t fwd_algo1 = static_cast<miopenConvFwdAlgorithm_t>(0); miopenConvFwdAlgorithm_t fwd_algo2 = static_cast<miopenConvFwdAlgorithm_t>(0); miopenConvBwdDataAlgorithm_t data_algo = static_cast<miopenConvBwdDataAlgorithm_t>(0); miopenConvBwdWeightsAlgorithm_t filter_algo = static_cast<miopenConvBwdWeightsAlgorithm_t>(0); #else cudnnConvolutionFwdAlgo_t fwd_algo1 = static_cast<cudnnConvolutionFwdAlgo_t>(0); cudnnConvolutionFwdAlgo_t fwd_algo2 = static_cast<cudnnConvolutionFwdAlgo_t>(0); cudnnConvolutionBwdDataAlgo_t data_algo = static_cast<cudnnConvolutionBwdDataAlgo_t>(0); cudnnConvolutionBwdFilterAlgo_t filter_algo = static_cast<cudnnConvolutionBwdFilterAlgo_t>(0); #endif auto layout = GetCudnnTensorFormat(DataLayout::kNCHW); // ddo = conv(ddI, W) + conv(I, ddW) size_t workspace_size = 0; T* transformed_ddy_channel = nullptr; if (ddO) { ddy = ddO->data<T>(); transformed_ddy_channel = transformed_ddO_channel.data<T>(); if (ddX) { args1.handle = handle; args1.idesc.set(transformed_ddX, iwo_group); args1.wdesc.set(*W, layout, iwo_group); args1.odesc.set(transformed_ddO_channel, iwo_group); args1.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search1 = SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = search1::GetWorkspaceSize(args1); fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, workspace_size, ctx); #else using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, ctx); workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1); #endif } if (ddW) { ddw = ddW->data<T>(); args2.handle = handle; args2.idesc.set(transformed_X, iwo_group); args2.wdesc.set(*ddW, layout, iwo_group); args2.odesc.set(transformed_ddO_channel, iwo_group); args2.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search2 = SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = std::max(workspace_size, search2::GetWorkspaceSize(args2)); fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, workspace_size, ctx); #else using search2 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, ctx); workspace_size = std::max(workspace_size, search2::GetWorkspaceSize(args2, fwd_algo2)); #endif } } if (dW && ddX) { dw = dW->data<T>(); args3.handle = handle; args3.idesc.set(transformed_ddX, iwo_group); args3.wdesc.set(*dW, layout, iwo_group); args3.odesc.set(transformed_dO_channel, iwo_group); args3.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search3 = SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>; workspace_size = std::max(workspace_size, search3::GetWorkspaceSize(args3)); filter_algo = search3::Find<T>(args3, exhaustive_search, deterministic, workspace_size, ctx); #else using search3 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>; filter_algo = search3::Find<T>(args3, exhaustive_search, deterministic, ctx); workspace_size = std::max(workspace_size, search3::GetWorkspaceSize(args3, filter_algo)); #endif } if (ddW && dX) { transformed_dx = transformed_dX.data<T>(); args4.handle = handle; args4.idesc.set(transformed_dX, iwo_group); args4.wdesc.set(*ddW, layout, iwo_group); args4.odesc.set(transformed_dO_channel, iwo_group); args4.cdesc.set(dtype, padding_common, strides, dilations, platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search4 = SearchAlgorithm<miopenConvBwdDataAlgorithm_t>; workspace_size = std::max(workspace_size, search4::GetWorkspaceSize(args4)); data_algo = search4::Find<T>(args4, exhaustive_search, deterministic, workspace_size, ctx); #else using search4 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>; data_algo = search4::Find<T>(args4, exhaustive_search, deterministic, ctx); workspace_size = std::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo)); #endif } int i_n, i_c, i_d, i_h, i_w; GetNCDHW(transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); int o_n, o_c, o_d, o_h, o_w; GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); int group_offset_in = i_c / groups * i_h * i_w * i_d; int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = W->numel() / groups; ScalingParamType<T> alpha = 1.0f; ScalingParamType<T> beta = 0.0f; // NOTE(zhiqiu): inplace addto is not supportted in double grad yet. // ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : // 0.0f; // VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto"); auto wkspace_handle = dev_ctx.cudnn_workspace_handle(); if (ddO) { if (ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::miopenConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx, args1.wdesc.desc(), w, args1.cdesc.desc(), fwd_algo1, &beta, args1.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx + i * group_offset_in, args1.wdesc.desc(), w + i * group_offset_filter, args1.cdesc.desc(), fwd_algo1, workspace_ptr, workspace_size, &beta, args1.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (ddW) { #ifdef PADDLE_WITH_HIP // MIOPEN ONLY support beta to be 0.0f wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::miopenConvolutionForward( handle, &alpha, args2.idesc.desc(), x, args2.wdesc.desc(), ddw, args2.cdesc.desc(), fwd_algo2, &beta, args2.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionForward( handle, &alpha, args2.idesc.desc(), x + i * group_offset_in, args2.wdesc.desc(), ddw + i * group_offset_filter, args2.cdesc.desc(), fwd_algo2, workspace_ptr, workspace_size, &alpha, args2.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (channel_last) { TransToChannelLast<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_ddO_channel, ddO); } } T* transformed_dy_channel = transformed_dO_channel.data<T>(); if (dW && ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::miopenConvolutionBackwardWeights( handle, &alpha, args3.odesc.desc(), transformed_dy_channel, args3.idesc.desc(), ddx, args3.cdesc.desc(), filter_algo, &beta, args3.wdesc.desc(), dw, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, args3.idesc.desc(), ddx + i * group_offset_in, args3.odesc.desc(), transformed_dy_channel + i * group_offset_out, args3.cdesc.desc(), filter_algo, workspace_ptr, workspace_size, &beta, args3.wdesc.desc(), dw + i * group_offset_filter)); }, workspace_size); } #endif } if (dX && ddW) { ddw = ddW->data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args4.odesc.desc(), transformed_dy_channel, args4.wdesc.desc(), ddw, args4.cdesc.desc(), data_algo, &beta, args4.idesc.desc(), transformed_dx, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args4.wdesc.desc(), ddw + i * group_offset_filter, args4.odesc.desc(), transformed_dy_channel + i * group_offset_out, args4.cdesc.desc(), data_algo, workspace_ptr, workspace_size, &beta, args4.idesc.desc(), transformed_dx + i * group_offset_in)); }, workspace_size); } #endif if (!is_sys_pad) { // reverse padded input std::vector<int> starts(X->dims().size(), 0); std::vector<int> axes(X->dims().size(), 0); for (size_t i = 0; i < X->dims().size(); ++i) { starts[i] = input_pad[2 * i]; axes[i] = i; } if (X->dims().size() == 4) { RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } else { RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } } if (channel_last) { TransToChannelLast<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_dX_channel, dX); } } } }; } // namespace operators } // namespace paddle namespace plat = paddle::platform; #ifdef PADDLE_WITH_HIP // MIOPEN do not support double REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel<float>, paddle::operators::CUDNNConvOpKernel<plat::float16>); REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvGradOpKernel<float>, paddle::operators::CUDNNConvGradOpKernel<plat::float16>); REGISTER_OP_KERNEL( conv2d_grad_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvDoubleGradOpKernel<float>, paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>); // ROCM has limit thread in depthwise_conv.cu and willl result in accuracy issue // Use depthwise_conv2d in MIOPEN to resolve this issue REGISTER_OP_KERNEL(depthwise_conv2d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel<float>, paddle::operators::CUDNNConvOpKernel<plat::float16>); REGISTER_OP_KERNEL(depthwise_conv2d_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvGradOpKernel<float>, paddle::operators::CUDNNConvGradOpKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL( depthwise_conv2d_grad_grad, paddle::operators::CUDNNConvDoubleGradOpKernel<float>, paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>); REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel<float>, paddle::operators::CUDNNConvOpKernel<plat::float16>); REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvGradOpKernel<float>); REGISTER_OP_KERNEL( conv3d_grad_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvDoubleGradOpKernel<float>, paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>); #else REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel<float>, paddle::operators::CUDNNConvOpKernel<double>, paddle::operators::CUDNNConvOpKernel<plat::float16>); REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvGradOpKernel<float>, paddle::operators::CUDNNConvGradOpKernel<double>, paddle::operators::CUDNNConvGradOpKernel<plat::float16>); REGISTER_OP_KERNEL( conv2d_grad_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvDoubleGradOpKernel<float>, paddle::operators::CUDNNConvDoubleGradOpKernel<double>, paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL( depthwise_conv2d_grad_grad, paddle::operators::CUDNNConvDoubleGradOpKernel<float>, paddle::operators::CUDNNConvDoubleGradOpKernel<double>, paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>); REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel<float>, paddle::operators::CUDNNConvOpKernel<double>, paddle::operators::CUDNNConvOpKernel<plat::float16>); REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvGradOpKernel<float>, paddle::operators::CUDNNConvGradOpKernel<double>); REGISTER_OP_KERNEL( conv3d_grad_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvDoubleGradOpKernel<float>, paddle::operators::CUDNNConvDoubleGradOpKernel<double>, paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>); #endif
7ebebbe849f1565e5170a5904634cf17b16b8d42.hip
// !!! This is a file automatically generated by hipify!!! #include "GPU_Face_Detect.cuh" #include "hip/hip_runtime.h" #include "lock.h" //====================================================================================================== // Declare GPU face detection engine variables //===================================================================================================== // CUDA performance timers hipEvent_t start, stop; // Device memory haar cascade GPUHaarCascade h_gpuHaarCascade; GPUHaarCascade dev_gpuHaarCascade; // Declare pointers for GPU texture memory hipArray * dev_sumArray = NULL; hipArray * dev_sqSumArray = NULL; // Arrays for copying detected faces results from GPU to be post-processed by CPU GPURect *detectedFaces, *dev_detectedFaces; size_t detectedFacesSize; // Initalize device memory for GPU face detection processing void initGPU(GPUHaarCascade &gpuCascade, IplImage * image, CvMat *sumImg, CvMat *sqSumImg) { int width = image->width; int height = image->height; //====================================================================================================== // Define & Init CUDA even timing to determine performance //===================================================================================================== HANDLE_ERROR( hipEventCreate( &start ) ); HANDLE_ERROR( hipEventCreate( &stop ) ); //====================================================================================================== // Define GPU Haar Cascade structures & convert CvHaarCascade to them //===================================================================================================== // Load gpu haar cascade into host copy h_gpuHaarCascade.load(&gpuCascade); // Allocate device memory allocateGPUCascade( h_gpuHaarCascade, dev_gpuHaarCascade); //================================================================== // Generate integral images & copy them to device texture memory //================================================================== // Convert double precision sqsum image to float precision cv::Mat sqSumMat(sqSumImg->rows, sqSumImg->cols, CV_64FC1, sqSumImg->data.fl); //cv::Mat sqSumMat(sqSumImg); sqSumMat.convertTo(sqSumMat, CV_32FC1); CvMat float_sqsm = sqSumMat; // Allocate texture memory for integral images & copy host results from OpenCV(cvIntegral()) to device allocateIntegralImagesGPU(sumImg, &float_sqsm, dev_sumArray, dev_sqSumArray); //=============================================================================== // Allocate & copy face array data to device memory for storing detection results //============================================================================== // Allocate memory on the CPU detectedFacesSize = width * height * sizeof(GPURect); detectedFaces = (GPURect *)malloc(detectedFacesSize); memset(detectedFaces, 0, detectedFacesSize); // Allocate memory on the GPU & copy host data HANDLE_ERROR( hipMalloc( (void**)&dev_detectedFaces, detectedFacesSize ) ); HANDLE_ERROR( hipMemcpy(dev_detectedFaces, detectedFaces, detectedFacesSize, hipMemcpyHostToDevice)); } // From array gpuFaces, check each CvRect.width to determine if the GPU determined this window as a valid face int selectFaces(std::vector<CvRect> &faces, GPURect *gpuFaces, int pixels) { int faces_detected = 0; for( int i = 0; i < pixels; i++ ) { // extract the detected rectanlges only GPURect face_rect = gpuFaces[i]; //CvRect face_rect = gpuFaces[i]; if(face_rect.width != 0) { CvRect convertRect(face_rect.x, face_rect.y, face_rect.width, face_rect.height); faces.push_back(convertRect); faces_detected++; } } return faces_detected; } void startCUDA_EventTming() { HANDLE_ERROR( hipEventRecord( start, 0 ) ); } float stopCUDA_EventTiming() { HANDLE_ERROR( hipEventRecord( stop, 0 ) ); HANDLE_ERROR( hipEventSynchronize( stop ) ); float elapsedTime; HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) ); return elapsedTime; } //=============================================================================== // Run v1 kernel for parrelized face detection //============================================================================== std::vector<CvRect> runGPUHaarDetection(std::vector<double> scale, int minNeighbors, FaceDetectionKernel kernelSelect) { printf("****Beginning GPU(Kernel %d) Haar Detection****\n\n", kernelSelect + 1); std::vector<CvRect> faces; float totalElapsedTime = 0.0f; for(int i = 0; i < scale.size(); i++) { // Modify all the features for the new scale h_gpuHaarCascade.setFeaturesForScale(scale[i]); // Copy new scaled values over to device size_t GPU_Classifier_Size = h_gpuHaarCascade.totalNumOfClassifiers * sizeof(GPUHaarClassifier); HANDLE_ERROR( hipMemcpy(dev_gpuHaarCascade.scaled_haar_classifiers, h_gpuHaarCascade.scaled_haar_classifiers, GPU_Classifier_Size, hipMemcpyHostToDevice)); dev_gpuHaarCascade.scale = h_gpuHaarCascade.scale; dev_gpuHaarCascade.real_window_size = h_gpuHaarCascade.real_window_size; dev_gpuHaarCascade.img_detection_size = h_gpuHaarCascade.img_detection_size; int w = dev_gpuHaarCascade.img_detection_size.width; int h = dev_gpuHaarCascade.img_detection_size.height; // Based on input selection, launch appropriate kernel float elapsedTime; switch(kernelSelect) { case V1: elapsedTime = launchKernel_v1(w, h); break; case V2: elapsedTime = launchKernel_v2(w, h); break; case V3: elapsedTime = launchKernel_v3(w, h); break; case V4: elapsedTime = launchKernel_v4(w, h); break; } totalElapsedTime += elapsedTime; // Copy results from device & process them on CPU HANDLE_ERROR( hipMemcpy(detectedFaces, dev_detectedFaces, detectedFacesSize, hipMemcpyDeviceToHost)); // Scan detectedFaces array from GPU for valid detected faces int faces_detected = selectFaces(faces, detectedFaces, w * h); // Output performance information for this stage printf("Stage: %d // Faces Detected: %d // GPU Time: %3.1f ms \n", i, faces_detected, elapsedTime); } // Output final performance printf("\nTotal compute time: %3.1f ms \n\n", totalElapsedTime); // Group detected faces for cleaner results if( minNeighbors != 0) { groupRectangles(faces, minNeighbors, GROUP_EPS); } // Clean up detected faces arrays for future processing memset(detectedFaces, 0, detectedFacesSize); HANDLE_ERROR( hipMemcpy(dev_detectedFaces, detectedFaces, detectedFacesSize, hipMemcpyHostToDevice)); return faces; } float launchKernel_v1(int width, int height) { // Define number of blocks and threads to divide work dim3 blocks(width/16, height/16); dim3 threads(16, 16); // Begin CUDA timing performance startCUDA_EventTming(); // Call kerenel on GPU to run haarcascade for every detection window in image hipLaunchKernelGGL(( haarDetection_v1), dim3(blocks), dim3(threads), 0, 0, dev_gpuHaarCascade, dev_detectedFaces); // Stop CUDA timing performance return stopCUDA_EventTiming(); } float launchKernel_v2(int width, int height) { // Define number of blocks and threads to divide work dim3 blocks(width/16, height/16); dim3 threads(16, 16); // Begin CUDA timing performance startCUDA_EventTming(); // Call kerenel on GPU to run haarcascade for every detection window in image hipLaunchKernelGGL(( haarDetection_v2), dim3(blocks), dim3(threads), 0, 0, dev_gpuHaarCascade, dev_detectedFaces); // Stop CUDA timing performance return stopCUDA_EventTiming(); } float launchKernel_v3(int width, int height) { // do num of image pixels divide by 256 divided by 16??? // Define number of blocks and threads to divide work dim3 blocks(32); dim3 threads(8, 8); // Begin CUDA timing performance startCUDA_EventTming(); // Call kerenel on GPU to run haarcascade for every detection window in image hipLaunchKernelGGL(( haarDetection_v3), dim3(blocks), dim3(threads), 0, 0, dev_gpuHaarCascade, dev_detectedFaces); // Stop CUDA timing performance return stopCUDA_EventTiming(); } float launchKernel_v4(int width, int height) { int size = sqrt((float)THREADS_PER_BLOCK_V4); // Define number of blocks and threads to divide work dim3 blocks(width/size, height/size); dim3 threads(size, size); // For this kernel, need to intialize lock for every block // Future implementaiton would try to use different means of mutex locking int numOfLocks = blocks.x * blocks.y; Lock *h_locks = (Lock *)malloc(numOfLocks * sizeof(Lock)); // Init mutex variable in Locks for(int i = 0; i < numOfLocks; i++) h_locks[i].init(); Lock * dev_locks; HANDLE_ERROR( hipMalloc( (void**)&dev_locks, numOfLocks * sizeof(Lock) ) ); HANDLE_ERROR( hipMemcpy(dev_locks, h_locks, numOfLocks * sizeof(Lock), hipMemcpyHostToDevice)); // Begin CUDA timing performance startCUDA_EventTming(); // Call kerenel on GPU to run haarcascade for every detection window in image hipLaunchKernelGGL(( haarDetection_v4), dim3(blocks), dim3(threads), 0, 0, dev_gpuHaarCascade, dev_detectedFaces, dev_locks); // Stop CUDA timing performance float time = stopCUDA_EventTiming(); // Free host & device memory from locks free(h_locks); HANDLE_ERROR(hipFree(dev_locks)); return time; } // Unbind Textures & free host and device memory void shutDownGPU() { releaseTextures(); HANDLE_ERROR( hipEventDestroy( start ) ); HANDLE_ERROR( hipEventDestroy( stop ) ); h_gpuHaarCascade.shutdown(); HANDLE_ERROR( hipFree(dev_gpuHaarCascade.haar_classifiers)); HANDLE_ERROR( hipFree(dev_gpuHaarCascade.scaled_haar_classifiers)); free(detectedFaces); HANDLE_ERROR( hipFree(dev_detectedFaces)); HANDLE_ERROR( hipFreeArray(dev_sumArray)); HANDLE_ERROR( hipFreeArray(dev_sqSumArray)); HANDLE_ERROR( hipDeviceReset()); } /* void runGPUHaarDetection(GPUHaarCascade &h_gpuHaarCascade, IplImage * image, CvMat *sumImg, CvMat *sqSumImg, std::vector<double> scale) { printf("****Beginning GPU Haar Detection****\n\n"); int width = image->width; int height = image->height; //====================================================================================================== // Define & Init CUDA even timing to determine performance //===================================================================================================== hipEvent_t start, stop; HANDLE_ERROR( hipEventCreate( &start ) ); HANDLE_ERROR( hipEventCreate( &stop ) ); //====================================================================================================== // Define GPU Haar Cascade structures & convert CvHaarCascade to them //===================================================================================================== GPUHaarCascade dev_gpuHaarCascade; dev_gpuHaarCascade.img_window_size.width = width; dev_gpuHaarCascade.img_window_size.height = height; // Allocate device memory allocateGPUCascade( h_gpuHaarCascade, dev_gpuHaarCascade); //================================================================== // Generate integral images & copy them to device texture memory //================================================================== // Declare pointers for GPU texture memory hipArray * dev_sumArray = NULL; hipArray * dev_sqSumArray = NULL; // Convert double precision sqsum image to float precision cv::Mat sqSumMat(sqSumImg); sqSumMat.convertTo(sqSumMat, CV_32FC1); CvMat float_sqsm = sqSumMat; // Allocate texture memory for integral images & copy host results from OpenCV(cvIntegral()) to device allocateIntegralImagesGPU(sumImg, &float_sqsm, dev_sumArray, dev_sqSumArray); //=============================================================================== // Allocate & copy face array data to device memory for storing detection results //============================================================================== // Declare host & device variables CvRect *detectedFaces, *dev_detectedFaces; // Allocate memory on the CPU size_t faceSize = width * height * sizeof(CvRect); detectedFaces = (CvRect *)malloc(faceSize); memset(detectedFaces, 0, faceSize); // Allocate memory on the GPU HANDLE_ERROR( hipMalloc( (void**)&dev_detectedFaces, faceSize ) ); HANDLE_ERROR( hipMemcpy(dev_detectedFaces, detectedFaces, faceSize, hipMemcpyHostToDevice)); //=============================================================================== // Run kernel for parrelized face detection //============================================================================== Lock locks[5000]; for(int i = 0; i < 5000; i++) locks[i].init(); Lock * dev_locks; HANDLE_ERROR( hipMalloc( (void**)&dev_locks, 5000 * sizeof(Lock) ) ); HANDLE_ERROR( hipMemcpy(dev_locks, locks, 5000 * sizeof(Lock), hipMemcpyHostToDevice)); std::vector<CvRect> faces; float totalElapsedTime = 0.0f; for(int i = 0; i < scale.size(); i++) { dev_gpuHaarCascade.scale = scale[i]; dev_gpuHaarCascade.real_window_size.width = cvRound(dev_gpuHaarCascade.orig_window_size.width * dev_gpuHaarCascade.scale); dev_gpuHaarCascade.real_window_size.height = cvRound(dev_gpuHaarCascade.orig_window_size.height * dev_gpuHaarCascade.scale); int w = cvRound(dev_gpuHaarCascade.img_window_size.width - dev_gpuHaarCascade.real_window_size.width); int h = cvRound(dev_gpuHaarCascade.img_window_size.height - dev_gpuHaarCascade.real_window_size.height); dev_gpuHaarCascade.img_detection_size.width = w; dev_gpuHaarCascade.img_detection_size.height = h; dim3 blocks(w/16, h/16); dim3 threads(16, 16); // v4 //dim3 blocks(w/8, h/8); //dim3 threads(8, 8); //dim3 blocks(32); //dim3 threads(8, 8); // RUN GPU HAAR DETECT HANDLE_ERROR( hipEventRecord( start, 0 ) ); // Call kerenel on GPU to run haarcascade for every detection window in image haarDetection<<<blocks, threads>>>(dev_gpuHaarCascade, dev_detectedFaces); //haarDetection4<<<blocks, threads>>>(dev_gpuHaarCascade, dev_detectedFaces, dev_locks); //haarDetection3<<<64, 32>>>(dev_gpuHaarCascade, dev_detectedFaces); //haarDetection2<<<blocks, threads>>>(dev_gpuHaarCascade, dev_detectedFaces); HANDLE_ERROR( hipEventRecord( stop, 0 ) ); HANDLE_ERROR( hipEventSynchronize( stop ) ); float elapsedTime; HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) ); totalElapsedTime += elapsedTime; // Copy results from device & process them on CPU HANDLE_ERROR( hipMemcpy(detectedFaces, dev_detectedFaces, faceSize, hipMemcpyDeviceToHost)); int faces_detected = 0; for( int i = 0; i < w * h; i++ ) { // extract the detected rectanlges only CvRect face_rect = detectedFaces[i]; if(face_rect.width != 0) { faces.push_back(face_rect); faces_detected++; } } printf("Stage: %d // Faces Detected: %d // GPU Time: %3.1f ms \n", i, faces_detected, elapsedTime); } //=============================================================================== // Stop CUDA timing events and output performance results //============================================================================== printf( "Time to compute: %3.1f ms\n", totalElapsedTime ); //=============================================================================== // Draw detected face results //============================================================================== // Group overlapping window const float GROUP_EPS = 0.4f; int minNeighbors = 3; if( minNeighbors != 0) { groupRectangles(faces, minNeighbors, GROUP_EPS); } for(int i = 0; i < faces.size(); i++) { CvRect face_rect = faces[i]; cvRectangle( image, cvPoint(face_rect.x, face_rect.y), cvPoint((face_rect.x + face_rect.width), (face_rect.y + face_rect.height)), CV_RGB(255,0,0), 3 ); } cvNamedWindow( "test", 0 ); cvShowImage( "test", image ); cvWaitKey(0); //=============================================================================== // Release all allocated memory //============================================================================== hipUnbindTexture(sumImageRef); hipUnbindTexture(sqSumImageRef); HANDLE_ERROR( hipEventDestroy( start ) ); HANDLE_ERROR( hipEventDestroy( stop ) ); free(detectedFaces); HANDLE_ERROR( hipFree(dev_detectedFaces)); //HANDLE_ERROR( hipFree(dev_gpuHaarCascade.haar_stage_classifiers)); HANDLE_ERROR( hipFree(dev_gpuhaarCascade.scaled_haar_classifiers)); HANDLE_ERROR( hipFreeArray(dev_sumArray)); HANDLE_ERROR( hipFreeArray(dev_sqSumArray)); HANDLE_ERROR( hipDeviceReset()); }*/
7ebebbe849f1565e5170a5904634cf17b16b8d42.cu
#include "GPU_Face_Detect.cuh" #include "cuda.h" #include "lock.h" //====================================================================================================== // Declare GPU face detection engine variables //===================================================================================================== // CUDA performance timers cudaEvent_t start, stop; // Device memory haar cascade GPUHaarCascade h_gpuHaarCascade; GPUHaarCascade dev_gpuHaarCascade; // Declare pointers for GPU texture memory cudaArray * dev_sumArray = NULL; cudaArray * dev_sqSumArray = NULL; // Arrays for copying detected faces results from GPU to be post-processed by CPU GPURect *detectedFaces, *dev_detectedFaces; size_t detectedFacesSize; // Initalize device memory for GPU face detection processing void initGPU(GPUHaarCascade &gpuCascade, IplImage * image, CvMat *sumImg, CvMat *sqSumImg) { int width = image->width; int height = image->height; //====================================================================================================== // Define & Init CUDA even timing to determine performance //===================================================================================================== HANDLE_ERROR( cudaEventCreate( &start ) ); HANDLE_ERROR( cudaEventCreate( &stop ) ); //====================================================================================================== // Define GPU Haar Cascade structures & convert CvHaarCascade to them //===================================================================================================== // Load gpu haar cascade into host copy h_gpuHaarCascade.load(&gpuCascade); // Allocate device memory allocateGPUCascade( h_gpuHaarCascade, dev_gpuHaarCascade); //================================================================== // Generate integral images & copy them to device texture memory //================================================================== // Convert double precision sqsum image to float precision cv::Mat sqSumMat(sqSumImg->rows, sqSumImg->cols, CV_64FC1, sqSumImg->data.fl); //cv::Mat sqSumMat(sqSumImg); sqSumMat.convertTo(sqSumMat, CV_32FC1); CvMat float_sqsm = sqSumMat; // Allocate texture memory for integral images & copy host results from OpenCV(cvIntegral()) to device allocateIntegralImagesGPU(sumImg, &float_sqsm, dev_sumArray, dev_sqSumArray); //=============================================================================== // Allocate & copy face array data to device memory for storing detection results //============================================================================== // Allocate memory on the CPU detectedFacesSize = width * height * sizeof(GPURect); detectedFaces = (GPURect *)malloc(detectedFacesSize); memset(detectedFaces, 0, detectedFacesSize); // Allocate memory on the GPU & copy host data HANDLE_ERROR( cudaMalloc( (void**)&dev_detectedFaces, detectedFacesSize ) ); HANDLE_ERROR( cudaMemcpy(dev_detectedFaces, detectedFaces, detectedFacesSize, cudaMemcpyHostToDevice)); } // From array gpuFaces, check each CvRect.width to determine if the GPU determined this window as a valid face int selectFaces(std::vector<CvRect> &faces, GPURect *gpuFaces, int pixels) { int faces_detected = 0; for( int i = 0; i < pixels; i++ ) { // extract the detected rectanlges only GPURect face_rect = gpuFaces[i]; //CvRect face_rect = gpuFaces[i]; if(face_rect.width != 0) { CvRect convertRect(face_rect.x, face_rect.y, face_rect.width, face_rect.height); faces.push_back(convertRect); faces_detected++; } } return faces_detected; } void startCUDA_EventTming() { HANDLE_ERROR( cudaEventRecord( start, 0 ) ); } float stopCUDA_EventTiming() { HANDLE_ERROR( cudaEventRecord( stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); float elapsedTime; HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) ); return elapsedTime; } //=============================================================================== // Run v1 kernel for parrelized face detection //============================================================================== std::vector<CvRect> runGPUHaarDetection(std::vector<double> scale, int minNeighbors, FaceDetectionKernel kernelSelect) { printf("****Beginning GPU(Kernel %d) Haar Detection****\n\n", kernelSelect + 1); std::vector<CvRect> faces; float totalElapsedTime = 0.0f; for(int i = 0; i < scale.size(); i++) { // Modify all the features for the new scale h_gpuHaarCascade.setFeaturesForScale(scale[i]); // Copy new scaled values over to device size_t GPU_Classifier_Size = h_gpuHaarCascade.totalNumOfClassifiers * sizeof(GPUHaarClassifier); HANDLE_ERROR( cudaMemcpy(dev_gpuHaarCascade.scaled_haar_classifiers, h_gpuHaarCascade.scaled_haar_classifiers, GPU_Classifier_Size, cudaMemcpyHostToDevice)); dev_gpuHaarCascade.scale = h_gpuHaarCascade.scale; dev_gpuHaarCascade.real_window_size = h_gpuHaarCascade.real_window_size; dev_gpuHaarCascade.img_detection_size = h_gpuHaarCascade.img_detection_size; int w = dev_gpuHaarCascade.img_detection_size.width; int h = dev_gpuHaarCascade.img_detection_size.height; // Based on input selection, launch appropriate kernel float elapsedTime; switch(kernelSelect) { case V1: elapsedTime = launchKernel_v1(w, h); break; case V2: elapsedTime = launchKernel_v2(w, h); break; case V3: elapsedTime = launchKernel_v3(w, h); break; case V4: elapsedTime = launchKernel_v4(w, h); break; } totalElapsedTime += elapsedTime; // Copy results from device & process them on CPU HANDLE_ERROR( cudaMemcpy(detectedFaces, dev_detectedFaces, detectedFacesSize, cudaMemcpyDeviceToHost)); // Scan detectedFaces array from GPU for valid detected faces int faces_detected = selectFaces(faces, detectedFaces, w * h); // Output performance information for this stage printf("Stage: %d // Faces Detected: %d // GPU Time: %3.1f ms \n", i, faces_detected, elapsedTime); } // Output final performance printf("\nTotal compute time: %3.1f ms \n\n", totalElapsedTime); // Group detected faces for cleaner results if( minNeighbors != 0) { groupRectangles(faces, minNeighbors, GROUP_EPS); } // Clean up detected faces arrays for future processing memset(detectedFaces, 0, detectedFacesSize); HANDLE_ERROR( cudaMemcpy(dev_detectedFaces, detectedFaces, detectedFacesSize, cudaMemcpyHostToDevice)); return faces; } float launchKernel_v1(int width, int height) { // Define number of blocks and threads to divide work dim3 blocks(width/16, height/16); dim3 threads(16, 16); // Begin CUDA timing performance startCUDA_EventTming(); // Call kerenel on GPU to run haarcascade for every detection window in image haarDetection_v1<<<blocks, threads>>>(dev_gpuHaarCascade, dev_detectedFaces); // Stop CUDA timing performance return stopCUDA_EventTiming(); } float launchKernel_v2(int width, int height) { // Define number of blocks and threads to divide work dim3 blocks(width/16, height/16); dim3 threads(16, 16); // Begin CUDA timing performance startCUDA_EventTming(); // Call kerenel on GPU to run haarcascade for every detection window in image haarDetection_v2<<<blocks, threads>>>(dev_gpuHaarCascade, dev_detectedFaces); // Stop CUDA timing performance return stopCUDA_EventTiming(); } float launchKernel_v3(int width, int height) { // do num of image pixels divide by 256 divided by 16??? // Define number of blocks and threads to divide work dim3 blocks(32); dim3 threads(8, 8); // Begin CUDA timing performance startCUDA_EventTming(); // Call kerenel on GPU to run haarcascade for every detection window in image haarDetection_v3<<<blocks, threads>>>(dev_gpuHaarCascade, dev_detectedFaces); // Stop CUDA timing performance return stopCUDA_EventTiming(); } float launchKernel_v4(int width, int height) { int size = sqrt((float)THREADS_PER_BLOCK_V4); // Define number of blocks and threads to divide work dim3 blocks(width/size, height/size); dim3 threads(size, size); // For this kernel, need to intialize lock for every block // Future implementaiton would try to use different means of mutex locking int numOfLocks = blocks.x * blocks.y; Lock *h_locks = (Lock *)malloc(numOfLocks * sizeof(Lock)); // Init mutex variable in Locks for(int i = 0; i < numOfLocks; i++) h_locks[i].init(); Lock * dev_locks; HANDLE_ERROR( cudaMalloc( (void**)&dev_locks, numOfLocks * sizeof(Lock) ) ); HANDLE_ERROR( cudaMemcpy(dev_locks, h_locks, numOfLocks * sizeof(Lock), cudaMemcpyHostToDevice)); // Begin CUDA timing performance startCUDA_EventTming(); // Call kerenel on GPU to run haarcascade for every detection window in image haarDetection_v4<<<blocks, threads>>>(dev_gpuHaarCascade, dev_detectedFaces, dev_locks); // Stop CUDA timing performance float time = stopCUDA_EventTiming(); // Free host & device memory from locks free(h_locks); HANDLE_ERROR(cudaFree(dev_locks)); return time; } // Unbind Textures & free host and device memory void shutDownGPU() { releaseTextures(); HANDLE_ERROR( cudaEventDestroy( start ) ); HANDLE_ERROR( cudaEventDestroy( stop ) ); h_gpuHaarCascade.shutdown(); HANDLE_ERROR( cudaFree(dev_gpuHaarCascade.haar_classifiers)); HANDLE_ERROR( cudaFree(dev_gpuHaarCascade.scaled_haar_classifiers)); free(detectedFaces); HANDLE_ERROR( cudaFree(dev_detectedFaces)); HANDLE_ERROR( cudaFreeArray(dev_sumArray)); HANDLE_ERROR( cudaFreeArray(dev_sqSumArray)); HANDLE_ERROR( cudaDeviceReset()); } /* void runGPUHaarDetection(GPUHaarCascade &h_gpuHaarCascade, IplImage * image, CvMat *sumImg, CvMat *sqSumImg, std::vector<double> scale) { printf("****Beginning GPU Haar Detection****\n\n"); int width = image->width; int height = image->height; //====================================================================================================== // Define & Init CUDA even timing to determine performance //===================================================================================================== cudaEvent_t start, stop; HANDLE_ERROR( cudaEventCreate( &start ) ); HANDLE_ERROR( cudaEventCreate( &stop ) ); //====================================================================================================== // Define GPU Haar Cascade structures & convert CvHaarCascade to them //===================================================================================================== GPUHaarCascade dev_gpuHaarCascade; dev_gpuHaarCascade.img_window_size.width = width; dev_gpuHaarCascade.img_window_size.height = height; // Allocate device memory allocateGPUCascade( h_gpuHaarCascade, dev_gpuHaarCascade); //================================================================== // Generate integral images & copy them to device texture memory //================================================================== // Declare pointers for GPU texture memory cudaArray * dev_sumArray = NULL; cudaArray * dev_sqSumArray = NULL; // Convert double precision sqsum image to float precision cv::Mat sqSumMat(sqSumImg); sqSumMat.convertTo(sqSumMat, CV_32FC1); CvMat float_sqsm = sqSumMat; // Allocate texture memory for integral images & copy host results from OpenCV(cvIntegral()) to device allocateIntegralImagesGPU(sumImg, &float_sqsm, dev_sumArray, dev_sqSumArray); //=============================================================================== // Allocate & copy face array data to device memory for storing detection results //============================================================================== // Declare host & device variables CvRect *detectedFaces, *dev_detectedFaces; // Allocate memory on the CPU size_t faceSize = width * height * sizeof(CvRect); detectedFaces = (CvRect *)malloc(faceSize); memset(detectedFaces, 0, faceSize); // Allocate memory on the GPU HANDLE_ERROR( cudaMalloc( (void**)&dev_detectedFaces, faceSize ) ); HANDLE_ERROR( cudaMemcpy(dev_detectedFaces, detectedFaces, faceSize, cudaMemcpyHostToDevice)); //=============================================================================== // Run kernel for parrelized face detection //============================================================================== Lock locks[5000]; for(int i = 0; i < 5000; i++) locks[i].init(); Lock * dev_locks; HANDLE_ERROR( cudaMalloc( (void**)&dev_locks, 5000 * sizeof(Lock) ) ); HANDLE_ERROR( cudaMemcpy(dev_locks, locks, 5000 * sizeof(Lock), cudaMemcpyHostToDevice)); std::vector<CvRect> faces; float totalElapsedTime = 0.0f; for(int i = 0; i < scale.size(); i++) { dev_gpuHaarCascade.scale = scale[i]; dev_gpuHaarCascade.real_window_size.width = cvRound(dev_gpuHaarCascade.orig_window_size.width * dev_gpuHaarCascade.scale); dev_gpuHaarCascade.real_window_size.height = cvRound(dev_gpuHaarCascade.orig_window_size.height * dev_gpuHaarCascade.scale); int w = cvRound(dev_gpuHaarCascade.img_window_size.width - dev_gpuHaarCascade.real_window_size.width); int h = cvRound(dev_gpuHaarCascade.img_window_size.height - dev_gpuHaarCascade.real_window_size.height); dev_gpuHaarCascade.img_detection_size.width = w; dev_gpuHaarCascade.img_detection_size.height = h; dim3 blocks(w/16, h/16); dim3 threads(16, 16); // v4 //dim3 blocks(w/8, h/8); //dim3 threads(8, 8); //dim3 blocks(32); //dim3 threads(8, 8); // RUN GPU HAAR DETECT HANDLE_ERROR( cudaEventRecord( start, 0 ) ); // Call kerenel on GPU to run haarcascade for every detection window in image haarDetection<<<blocks, threads>>>(dev_gpuHaarCascade, dev_detectedFaces); //haarDetection4<<<blocks, threads>>>(dev_gpuHaarCascade, dev_detectedFaces, dev_locks); //haarDetection3<<<64, 32>>>(dev_gpuHaarCascade, dev_detectedFaces); //haarDetection2<<<blocks, threads>>>(dev_gpuHaarCascade, dev_detectedFaces); HANDLE_ERROR( cudaEventRecord( stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); float elapsedTime; HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) ); totalElapsedTime += elapsedTime; // Copy results from device & process them on CPU HANDLE_ERROR( cudaMemcpy(detectedFaces, dev_detectedFaces, faceSize, cudaMemcpyDeviceToHost)); int faces_detected = 0; for( int i = 0; i < w * h; i++ ) { // extract the detected rectanlges only CvRect face_rect = detectedFaces[i]; if(face_rect.width != 0) { faces.push_back(face_rect); faces_detected++; } } printf("Stage: %d // Faces Detected: %d // GPU Time: %3.1f ms \n", i, faces_detected, elapsedTime); } //=============================================================================== // Stop CUDA timing events and output performance results //============================================================================== printf( "Time to compute: %3.1f ms\n", totalElapsedTime ); //=============================================================================== // Draw detected face results //============================================================================== // Group overlapping window const float GROUP_EPS = 0.4f; int minNeighbors = 3; if( minNeighbors != 0) { groupRectangles(faces, minNeighbors, GROUP_EPS); } for(int i = 0; i < faces.size(); i++) { CvRect face_rect = faces[i]; cvRectangle( image, cvPoint(face_rect.x, face_rect.y), cvPoint((face_rect.x + face_rect.width), (face_rect.y + face_rect.height)), CV_RGB(255,0,0), 3 ); } cvNamedWindow( "test", 0 ); cvShowImage( "test", image ); cvWaitKey(0); //=============================================================================== // Release all allocated memory //============================================================================== cudaUnbindTexture(sumImageRef); cudaUnbindTexture(sqSumImageRef); HANDLE_ERROR( cudaEventDestroy( start ) ); HANDLE_ERROR( cudaEventDestroy( stop ) ); free(detectedFaces); HANDLE_ERROR( cudaFree(dev_detectedFaces)); //HANDLE_ERROR( cudaFree(dev_gpuHaarCascade.haar_stage_classifiers)); HANDLE_ERROR( cudaFree(dev_gpuhaarCascade.scaled_haar_classifiers)); HANDLE_ERROR( cudaFreeArray(dev_sumArray)); HANDLE_ERROR( cudaFreeArray(dev_sqSumArray)); HANDLE_ERROR( cudaDeviceReset()); }*/
d8ab16c54648097511c4f29a6423c5064aa0d670.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <stdlib.h> /* srand, rand */ #include <time.h> /* time */ hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); ///////////////////////////////////////////////////////////////////////////////////////////////////////////////BEG __global__ void reduce0(int*g_idata, int*g_odata){ //shared memory for one block of threads extern __shared__ int sdata[]; // each thread loadsone element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i= blockIdx.x*blockDim.x+ threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { if(tid % (2*s) == 0){ sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if(tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reduce1(int*g_idata, int*g_odata){ extern __shared__ int sdata[]; // each thread loadsone element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i= blockIdx.x*blockDim.x+ threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if(tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reduce2(int*g_idata, int*g_odata){ extern __shared__ int sdata[]; // each thread loadsone element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i= blockIdx.x*blockDim.x+ threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if(tid == 0) g_odata[blockIdx.x] = sdata[0]; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////END __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // Helper function for using CUDA to reduce vectors in parallel. hipError_t reduceWithCuda(int *c, const int *a, unsigned int size){ int *dev_a = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); // (size OR 1) * sizeof(int) if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } //blocks per grid, threads per block. Num of threads = x*y // <<<x, y >>> // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( reduce0), dim3(1), dim3(size), 0, 0, dev_a, dev_c); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "reduce0 launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); return cudaStatus; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. //hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); hipError_t cudaStatus = reduceWithCuda(c, a, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } int scan; scanf("%d", &scan); return 0; }
d8ab16c54648097511c4f29a6423c5064aa0d670.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <stdlib.h> /* srand, rand */ #include <time.h> /* time */ cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); ///////////////////////////////////////////////////////////////////////////////////////////////////////////////BEG __global__ void reduce0(int*g_idata, int*g_odata){ //shared memory for one block of threads extern __shared__ int sdata[]; // each thread loadsone element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i= blockIdx.x*blockDim.x+ threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { if(tid % (2*s) == 0){ sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if(tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reduce1(int*g_idata, int*g_odata){ extern __shared__ int sdata[]; // each thread loadsone element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i= blockIdx.x*blockDim.x+ threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if(tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reduce2(int*g_idata, int*g_odata){ extern __shared__ int sdata[]; // each thread loadsone element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i= blockIdx.x*blockDim.x+ threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if(tid == 0) g_odata[blockIdx.x] = sdata[0]; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////END __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // Helper function for using CUDA to reduce vectors in parallel. cudaError_t reduceWithCuda(int *c, const int *a, unsigned int size){ int *dev_a = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); // (size OR 1) * sizeof(int) if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } //blocks per grid, threads per block. Num of threads = x*y // <<<x, y >>> // Launch a kernel on the GPU with one thread for each element. reduce0<<<1, size>>>(dev_a, dev_c); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "reduce0 launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); return cudaStatus; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. //cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); cudaError_t cudaStatus = reduceWithCuda(c, a, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } int scan; scanf("%d", &scan); return 0; }
dfbe1025665c945efdb22e955f5946762ec70c06.hip
// !!! This is a file automatically generated by hipify!!! #define GLM_FORCE_CUDA #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 5.0f #define rule2Distance 3.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? // needed for use with thrust thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // DONE-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. glm::vec3* dev_pos_shuffler; glm::vec3* dev_vel_shuffler; // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridSideCount; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to hipFree in Boids::endSimulation. hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_pos failed!"); hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!"); hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects, dev_pos, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance); int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridSideCount = 2 * halfSideCount; gridCellCount = gridSideCount * gridSideCount * gridSideCount; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // DONE-2.1 DONE-2.3 - Allocate additional buffers here. hipMalloc((void **)&dev_particleArrayIndices, numObjects * sizeof(*dev_particleArrayIndices)); checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!"); hipMalloc((void **)&dev_particleGridIndices, numObjects * sizeof(*dev_particleGridIndices)); checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!"); dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices); dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices); hipMalloc((void **)&dev_gridCellStartIndices, gridCellCount * sizeof(*dev_gridCellStartIndices)); checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!"); hipMalloc((void **)&dev_gridCellEndIndices, gridCellCount * sizeof(*dev_gridCellEndIndices)); checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!"); hipMalloc((void**)&dev_pos_shuffler, numObjects * sizeof(*dev_pos_shuffler)); checkCUDAErrorWithLine("hipMalloc dev_pos_shuffler failed!"); hipMalloc((void**)&dev_vel_shuffler, numObjects * sizeof(*dev_vel_shuffler)); checkCUDAErrorWithLine("hipMalloc dev_vel_shuffler failed!"); hipDeviceSynchronize(); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); hipDeviceSynchronize(); } /****************** * stepSimulation * ******************/ /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context * Compute the new velocity on the body with index `iSelf` due to the `N` boids * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { glm::vec3 delta_vel(0.0f); glm::vec3 rule1_neighbor_pos_sum(0.0f); float rule1_neighbor_count = 0.0f; glm::vec3 rule2_total_offset(0.0f); glm::vec3 rule3_neighbor_vel_sum(0.0f); float rule3_neighbor_count = 0.0f; float current_distance; glm::vec3 current_pos_offset; for (int i = 0; i < N; i++) { if (i == iSelf){ continue; } current_pos_offset = pos[i] - pos[iSelf]; current_distance = glm::length(current_pos_offset); // Rule 1: Get neighbor position sum and neighbor count for rule1 if (current_distance < rule1Distance) { rule1_neighbor_pos_sum += pos[i]; rule1_neighbor_count += 1.0f; } // Rule 2: Calculate offset for rule 2 if (current_distance < rule2Distance) { rule2_total_offset -= current_pos_offset; } // Rule 3: Get velocity sum and neighbor count for rule 3 if (current_distance < rule3Distance) { rule3_neighbor_vel_sum += vel[i]; rule3_neighbor_count += 1.0f; } } // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (rule1_neighbor_count > 0.0f) { delta_vel += rule1Scale * ((rule1_neighbor_pos_sum / rule1_neighbor_count) - pos[iSelf]); } // Rule 2: boids try to stay a distance d away from each other delta_vel += rule2Scale * rule2_total_offset; // Rule 3: boids try to match the speed of surrounding boids if (rule3_neighbor_count > 0.0f) { delta_vel += rule3Scale * ((rule3_neighbor_vel_sum / rule3_neighbor_count) - vel[iSelf]); } return delta_vel; } /** * DONE-1.2 implement basic flocking * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // static const float max_sqr_speed = maxSpeed * maxSpeed; auto index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } // Compute a new velocity based on pos and vel1 glm::vec3 new_vel = vel1[index] + computeVelocityChange(N, index, pos, vel1); // Clamp the speed auto speed = glm::length(new_vel); if (speed > maxSpeed) { new_vel = new_vel * (maxSpeed / speed); } // Record the new velocity into vel2. Question: why NOT vel1? // Ans: neighbors need vel from last frame to update their velocity vel2[index] = new_vel; } /** * LOOK-1.2 Since this is pretty trivial, we implemented it for you. * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. // LOOK-2.3 Looking at this method, what would be the most memory efficient // order for iterating over neighboring grid cells? // for(x) // for(y) // for(z)? Or some other order? // Ans: I think it is // for(z) // for(y) // for(x) __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { return x + y * gridResolution + z * gridResolution * gridResolution; } // This function returns -1 if any dimension falls out of boundry __device__ int gridIndex3Dto1DWithCheck(int x, int y, int z, int gridResolution) { if (x < 0 || x >= gridResolution || y < 0 || y >= gridResolution || z < 0 || z >= gridResolution) { return -1; } else { return gridIndex3Dto1D(x, y, z, gridResolution); } } __device__ glm::vec3 gridIndex3DFromPosition(const glm::vec3& pos , const glm::vec3& grid_min, float inverse_cell_width) { return (pos - grid_min) * inverse_cell_width; } __device__ int gridIndexFromPosition(const glm::vec3& pos, const glm::vec3& grid_min , float inverse_cell_width, int grid_resolution) { auto gridindex_3d = gridIndex3DFromPosition(pos, grid_min, inverse_cell_width); auto gridindex = gridIndex3Dto1D(static_cast<int>(gridindex_3d.x) , static_cast<int>(gridindex_3d.y) , static_cast<int>(gridindex_3d.z) , grid_resolution); return gridindex; } __global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *indices, int *gridIndices) { // DONE-2.1 // - Label each boid with the index of its grid cell. // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 auto index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } indices[index] = index; gridIndices[index] = gridIndexFromPosition(pos[index], gridMin , inverseCellWidth, gridResolution); } // LOOK-2.1 Consider how this could be useful for indicating that a cell // does not enclose any boids __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { // DONE-2.1 // Identify the start point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" // index is sorted index, not boid index! auto index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } auto prev_index = (index - 1 + N) % N; // To minimize branches if (particleGridIndices[index] != particleGridIndices[prev_index]) { gridCellStartIndices[particleGridIndices[index]] = index; gridCellEndIndices[particleGridIndices[prev_index]] = prev_index + 1; // intended to make the end side open like most C++ ranges } } // Helper function to determine direction (1 or -1) of the neighbor // grid cell in one dimension __device__ int directionOfNeighborSingleDimension(float axis_index) { if (static_cast<int>(axis_index + 0.5f) > static_cast<int>(axis_index)) { return 1; } else { return -1; } } __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridResolution, glm::vec3 gridMin , float inverseCellWidth, float cellWidth , int *gridCellStartIndices, int *gridCellEndIndices , int *particleArrayIndices, glm::vec3 *pos , glm::vec3 *vel1, glm::vec3 *vel2) { // DONE-2.1 - Update a boid's velocity using the uniform grid to reduce // the number of boids that need to be checked. auto index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } // - Identify the grid cell that this particle is in auto boid_index = particleArrayIndices[index]; auto gridcell_index3d = gridIndex3DFromPosition(pos[boid_index], gridMin , inverseCellWidth); // - Identify which cells may contain neighbors. This isn't always 8. auto sign_x = directionOfNeighborSingleDimension(gridcell_index3d.x); auto sign_y = directionOfNeighborSingleDimension(gridcell_index3d.y); auto sign_z = directionOfNeighborSingleDimension(gridcell_index3d.z); auto index_x = static_cast<int>(gridcell_index3d.x); auto index_y = static_cast<int>(gridcell_index3d.y); auto index_z = static_cast<int>(gridcell_index3d.z); int possible_neighbor_indices[8]; possible_neighbor_indices[0] = gridIndex3Dto1DWithCheck(index_x, index_y, index_z , gridResolution); possible_neighbor_indices[1] = gridIndex3Dto1DWithCheck(index_x + sign_x, index_y, index_z , gridResolution); possible_neighbor_indices[2] = gridIndex3Dto1DWithCheck(index_x, index_y + sign_y, index_z , gridResolution); possible_neighbor_indices[3] = gridIndex3Dto1DWithCheck(index_x, index_y, index_z + sign_z , gridResolution); possible_neighbor_indices[4] = gridIndex3Dto1DWithCheck(index_x + sign_x, index_y + sign_y, index_z , gridResolution); possible_neighbor_indices[5] = gridIndex3Dto1DWithCheck(index_x + sign_x, index_y, index_z + sign_z , gridResolution); possible_neighbor_indices[6] = gridIndex3Dto1DWithCheck(index_x, index_y + sign_y, index_z + sign_z , gridResolution); possible_neighbor_indices[7] = gridIndex3Dto1DWithCheck(index_x + sign_x, index_y + sign_y, index_z + sign_z , gridResolution); // - For each cell, read the start/end indices in the boid pointer array. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. glm::vec3 delta_vel(0.0f); glm::vec3 rule1_neighbor_pos_sum(0.0f); float rule1_neighbor_count = 0.0f; glm::vec3 rule2_total_offset(0.0f); glm::vec3 rule3_neighbor_vel_sum(0.0f); float rule3_neighbor_count = 0.0f; float current_distance; glm::vec3 current_pos_offset; int iother; for (const auto neighbor_cell_index : possible_neighbor_indices) { if (neighbor_cell_index == -1) { continue; } for (int i = gridCellStartIndices[neighbor_cell_index] ; i < gridCellEndIndices[neighbor_cell_index] ; i++) { iother = particleArrayIndices[i]; if (iother == boid_index){ continue; } current_pos_offset = pos[iother] - pos[boid_index]; current_distance = glm::length(current_pos_offset); // Rule 1: Get neighbor position sum and neighbor count for rule1 if (current_distance < rule1Distance) { rule1_neighbor_pos_sum += pos[iother]; rule1_neighbor_count += 1.0f; } // Rule 2: Calculate offset for rule 2 if (current_distance < rule2Distance) { rule2_total_offset -= current_pos_offset; } // Rule 3: Get velocity sum and neighbor count for rule 3 if (current_distance < rule3Distance) { rule3_neighbor_vel_sum += vel1[iother]; rule3_neighbor_count += 1.0f; } } } // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (rule1_neighbor_count > 0.0f) { delta_vel += rule1Scale * ((rule1_neighbor_pos_sum / rule1_neighbor_count) - pos[boid_index]); } // Rule 2: boids try to stay a distance d away from each other delta_vel += rule2Scale * rule2_total_offset; // Rule 3: boids try to match the speed of surrounding boids if (rule3_neighbor_count > 0.0f) { delta_vel += rule3Scale * ((rule3_neighbor_vel_sum / rule3_neighbor_count) - vel1[boid_index]); } auto new_vel = vel1[boid_index] + delta_vel; // - Clamp the speed change before putting the new speed in vel2 auto speed = glm::length(new_vel); if (speed > maxSpeed) { new_vel = new_vel * (maxSpeed / speed); } vel2[boid_index] = new_vel; } __global__ void kernReshuffle(int N, glm::vec3* pos , glm::vec3* vel, glm::vec3* pos_shuffler, glm::vec3* vel_shuffler , int* particleArrayIndices) { // Reshuffle pos and vel using particleArrayIndices // , store in pos_shuffler and vel_shuffler auto index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } pos_shuffler[index] = pos[particleArrayIndices[index]]; vel_shuffler[index] = vel[particleArrayIndices[index]]; } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // DONE-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered, // except with one less level of indirection. // This should expect gridCellStartIndices and gridCellEndIndices to refer // directly to pos and vel1. auto index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } // - Identify the grid cell that this particle is in auto gridcell_index3d = gridIndex3DFromPosition(pos[index], gridMin , inverseCellWidth); // - Identify which cells may contain neighbors. This isn't always 8. auto sign_x = directionOfNeighborSingleDimension(gridcell_index3d.x); auto sign_y = directionOfNeighborSingleDimension(gridcell_index3d.y); auto sign_z = directionOfNeighborSingleDimension(gridcell_index3d.z); auto index_x = static_cast<int>(gridcell_index3d.x); auto index_y = static_cast<int>(gridcell_index3d.y); auto index_z = static_cast<int>(gridcell_index3d.z); int possible_neighbor_indices[8]; // CHANGE: order of this array changed from 2.1 to match the best for(z): for(y): for(x) order int index_z_smaller = imin(index_z, index_z + sign_z); int index_z_bigger = imax(index_z, index_z + sign_z); int index_y_smaller = imin(index_y, index_y + sign_y); int index_y_bigger = imax(index_y, index_y + sign_y); int index_x_smaller = imin(index_x, index_x + sign_x); int index_x_bigger = imax(index_x, index_x + sign_x); possible_neighbor_indices[0] = gridIndex3Dto1DWithCheck(index_x_smaller, index_y_smaller, index_z_smaller , gridResolution); possible_neighbor_indices[1] = gridIndex3Dto1DWithCheck(index_x_bigger, index_y_smaller, index_z_smaller , gridResolution); possible_neighbor_indices[2] = gridIndex3Dto1DWithCheck(index_x_smaller, index_y_bigger, index_z_smaller , gridResolution); possible_neighbor_indices[3] = gridIndex3Dto1DWithCheck(index_x_bigger, index_y_bigger, index_z_smaller , gridResolution); possible_neighbor_indices[4] = gridIndex3Dto1DWithCheck(index_x_smaller, index_y_smaller, index_z_bigger , gridResolution); possible_neighbor_indices[5] = gridIndex3Dto1DWithCheck(index_x_bigger, index_y_smaller, index_z_bigger , gridResolution); possible_neighbor_indices[6] = gridIndex3Dto1DWithCheck(index_x_smaller, index_y_bigger, index_z_bigger , gridResolution); possible_neighbor_indices[7] = gridIndex3Dto1DWithCheck(index_x_bigger, index_y_bigger, index_z_bigger , gridResolution); // - For each cell, read the start/end indices in the boid pointer array. // DIFFERENCE: For best results, consider what order the cells should be // checked in to maximize the memory benefits of reordering the boids data. // // CHANGE: order of the array (possible_neighbor_indices) above already changed // so the order of the cells checked become // for(z): for(y): for(x) glm::vec3 delta_vel(0.0f); glm::vec3 rule1_neighbor_pos_sum(0.0f); float rule1_neighbor_count = 0.0f; glm::vec3 rule2_total_offset(0.0f); glm::vec3 rule3_neighbor_vel_sum(0.0f); float rule3_neighbor_count = 0.0f; float current_distance; glm::vec3 current_pos_offset; for (const auto neighbor_cell_index : possible_neighbor_indices) { if (neighbor_cell_index == -1) { continue; } // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. for (int iother = gridCellStartIndices[neighbor_cell_index] ; iother < gridCellEndIndices[neighbor_cell_index] ; iother++) { if (iother == index){ continue; } current_pos_offset = pos[iother] - pos[index]; current_distance = glm::length(current_pos_offset); // Rule 1: Get neighbor position sum and neighbor count for rule1 if (current_distance < rule1Distance) { rule1_neighbor_pos_sum += pos[iother]; rule1_neighbor_count += 1.0f; } // Rule 2: Calculate offset for rule 2 if (current_distance < rule2Distance) { rule2_total_offset -= current_pos_offset; } // Rule 3: Get velocity sum and neighbor count for rule 3 if (current_distance < rule3Distance) { rule3_neighbor_vel_sum += vel1[iother]; rule3_neighbor_count += 1.0f; } } } // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (rule1_neighbor_count > 0.0f) { delta_vel += rule1Scale * ((rule1_neighbor_pos_sum / rule1_neighbor_count) - pos[index]); } // Rule 2: boids try to stay a distance d away from each other delta_vel += rule2Scale * rule2_total_offset; // Rule 3: boids try to match the speed of surrounding boids if (rule3_neighbor_count > 0.0f) { delta_vel += rule3Scale * ((rule3_neighbor_vel_sum / rule3_neighbor_count) - vel1[index]); } auto new_vel = vel1[index] + delta_vel; // - Clamp the speed change before putting the new speed in vel2 auto speed = glm::length(new_vel); if (speed > maxSpeed) { new_vel = new_vel * (maxSpeed / speed); } vel2[index] = new_vel; } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { // DONE-1.2 - use the kernels you wrote to step the simulation forward in time. dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); hipLaunchKernelGGL(( kernUpdateVelocityBruteForce), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2); hipLaunchKernelGGL(( kernUpdatePos) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dt, dev_pos, dev_vel1); // DONE-1.2 ping-pong the velocity buffers // swap pointers so current velocity become new velocity std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationScatteredGrid(float dt) { dim3 fullBlocksPerGridForBoids((numObjects + blockSize - 1) / blockSize); dim3 fullBlocksPerGridForCells((gridCellCount + blockSize - 1) / blockSize); // DONE-2.1 // Uniform Grid Neighbor search using Thrust sort. // In Parallel: // - label each particle with its array index as well as its grid index. // Use 2x width grids. kernComputeIndices << <fullBlocksPerGridForBoids, blockSize >> >(numObjects, gridSideCount , gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects , dev_thrust_particleArrayIndices); // reset start and end buffer since not all values are set during kernIdentifyCellStartEnd hipLaunchKernelGGL(( kernResetIntBuffer) , dim3(fullBlocksPerGridForCells), dim3(blockSize) , 0, 0, gridCellCount , dev_gridCellStartIndices, -1); hipLaunchKernelGGL(( kernResetIntBuffer) , dim3(fullBlocksPerGridForCells), dim3(blockSize) , 0, 0, gridCellCount , dev_gridCellEndIndices, -1); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices hipLaunchKernelGGL(( kernIdentifyCellStartEnd) , dim3(fullBlocksPerGridForBoids), dim3(blockSize) , 0, 0, numObjects , dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); // - Perform velocity updates using neighbor search hipLaunchKernelGGL(( kernUpdateVelNeighborSearchScattered) , dim3(fullBlocksPerGridForBoids), dim3(blockSize) , 0, 0, numObjects , gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices , dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2); // - Update positions hipLaunchKernelGGL(( kernUpdatePos) , dim3(fullBlocksPerGridForBoids), dim3(blockSize) , 0, 0, numObjects, dt, dev_pos, dev_vel1); // - Ping-pong buffers as needed std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationCoherentGrid(float dt) { dim3 fullBlocksPerGridForBoids((numObjects + blockSize - 1) / blockSize); dim3 fullBlocksPerGridForCells((gridCellCount + blockSize - 1) / blockSize); // DONE-2.3 - start by copying Boids::stepSimulationNaiveGrid // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. // In Parallel: // - Label each particle with its array index as well as its grid index. // Use 2x width grids hipLaunchKernelGGL(( kernComputeIndices) , dim3(fullBlocksPerGridForBoids), dim3(blockSize) , 0, 0, numObjects, gridSideCount , gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects , dev_thrust_particleArrayIndices); // reset start and end buffer since not all values are set during kernIdentifyCellStartEnd hipLaunchKernelGGL(( kernResetIntBuffer) , dim3(fullBlocksPerGridForCells), dim3(blockSize) , 0, 0, gridCellCount , dev_gridCellStartIndices, -1); hipLaunchKernelGGL(( kernResetIntBuffer) , dim3(fullBlocksPerGridForCells), dim3(blockSize) , 0, 0, gridCellCount , dev_gridCellEndIndices, -1); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices hipLaunchKernelGGL(( kernIdentifyCellStartEnd) , dim3(fullBlocksPerGridForBoids), dim3(blockSize) , 0, 0, numObjects , dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); // - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all // the particle data in the simulation array. // CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED hipLaunchKernelGGL(( kernReshuffle) , dim3(fullBlocksPerGridForBoids), dim3(blockSize) , 0, 0, numObjects, dev_pos , dev_vel1, dev_pos_shuffler, dev_vel_shuffler, dev_particleArrayIndices); std::swap(dev_pos, dev_pos_shuffler); std::swap(dev_vel1, dev_vel_shuffler); // - Perform velocity updates using neighbor search hipLaunchKernelGGL(( kernUpdateVelNeighborSearchCoherent) , dim3(fullBlocksPerGridForBoids), dim3(blockSize) , 0, 0, numObjects , gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices , dev_gridCellEndIndices, dev_pos, dev_vel1, dev_vel2); // - Update positions hipLaunchKernelGGL(( kernUpdatePos) , dim3(fullBlocksPerGridForBoids), dim3(blockSize) , 0, 0, numObjects, dt, dev_pos, dev_vel1); // - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. std::swap(dev_vel1, dev_vel2); } void Boids::endSimulation() { hipFree(dev_vel1); hipFree(dev_vel2); hipFree(dev_pos); // DONE-2.1 DONE-2.3 - Free any additional buffers here. hipFree(dev_particleArrayIndices); hipFree(dev_particleGridIndices); hipFree(dev_gridCellStartIndices); hipFree(dev_gridCellEndIndices); hipFree(dev_pos_shuffler); hipFree(dev_vel_shuffler); } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; int *intKeys = new int[N]; int *intValues = new int[N]; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; hipMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!"); hipMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice); hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost); hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup delete[] intKeys; delete[] intValues; hipFree(dev_intKeys); hipFree(dev_intValues); checkCUDAErrorWithLine("hipFree failed!"); return; }
dfbe1025665c945efdb22e955f5946762ec70c06.cu
#define GLM_FORCE_CUDA #include <stdio.h> #include <cuda.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 5.0f #define rule2Distance 3.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? // needed for use with thrust thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // DONE-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. glm::vec3* dev_pos_shuffler; glm::vec3* dev_vel_shuffler; // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridSideCount; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to cudaFree in Boids::endSimulation. cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_pos failed!"); cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!"); cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects, dev_pos, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance); int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridSideCount = 2 * halfSideCount; gridCellCount = gridSideCount * gridSideCount * gridSideCount; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // DONE-2.1 DONE-2.3 - Allocate additional buffers here. cudaMalloc((void **)&dev_particleArrayIndices, numObjects * sizeof(*dev_particleArrayIndices)); checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!"); cudaMalloc((void **)&dev_particleGridIndices, numObjects * sizeof(*dev_particleGridIndices)); checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!"); dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices); dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices); cudaMalloc((void **)&dev_gridCellStartIndices, gridCellCount * sizeof(*dev_gridCellStartIndices)); checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!"); cudaMalloc((void **)&dev_gridCellEndIndices, gridCellCount * sizeof(*dev_gridCellEndIndices)); checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!"); cudaMalloc((void**)&dev_pos_shuffler, numObjects * sizeof(*dev_pos_shuffler)); checkCUDAErrorWithLine("cudaMalloc dev_pos_shuffler failed!"); cudaMalloc((void**)&dev_vel_shuffler, numObjects * sizeof(*dev_vel_shuffler)); checkCUDAErrorWithLine("cudaMalloc dev_vel_shuffler failed!"); cudaThreadSynchronize(); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); cudaThreadSynchronize(); } /****************** * stepSimulation * ******************/ /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context * Compute the new velocity on the body with index `iSelf` due to the `N` boids * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { glm::vec3 delta_vel(0.0f); glm::vec3 rule1_neighbor_pos_sum(0.0f); float rule1_neighbor_count = 0.0f; glm::vec3 rule2_total_offset(0.0f); glm::vec3 rule3_neighbor_vel_sum(0.0f); float rule3_neighbor_count = 0.0f; float current_distance; glm::vec3 current_pos_offset; for (int i = 0; i < N; i++) { if (i == iSelf){ continue; } current_pos_offset = pos[i] - pos[iSelf]; current_distance = glm::length(current_pos_offset); // Rule 1: Get neighbor position sum and neighbor count for rule1 if (current_distance < rule1Distance) { rule1_neighbor_pos_sum += pos[i]; rule1_neighbor_count += 1.0f; } // Rule 2: Calculate offset for rule 2 if (current_distance < rule2Distance) { rule2_total_offset -= current_pos_offset; } // Rule 3: Get velocity sum and neighbor count for rule 3 if (current_distance < rule3Distance) { rule3_neighbor_vel_sum += vel[i]; rule3_neighbor_count += 1.0f; } } // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (rule1_neighbor_count > 0.0f) { delta_vel += rule1Scale * ((rule1_neighbor_pos_sum / rule1_neighbor_count) - pos[iSelf]); } // Rule 2: boids try to stay a distance d away from each other delta_vel += rule2Scale * rule2_total_offset; // Rule 3: boids try to match the speed of surrounding boids if (rule3_neighbor_count > 0.0f) { delta_vel += rule3Scale * ((rule3_neighbor_vel_sum / rule3_neighbor_count) - vel[iSelf]); } return delta_vel; } /** * DONE-1.2 implement basic flocking * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // static const float max_sqr_speed = maxSpeed * maxSpeed; auto index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } // Compute a new velocity based on pos and vel1 glm::vec3 new_vel = vel1[index] + computeVelocityChange(N, index, pos, vel1); // Clamp the speed auto speed = glm::length(new_vel); if (speed > maxSpeed) { new_vel = new_vel * (maxSpeed / speed); } // Record the new velocity into vel2. Question: why NOT vel1? // Ans: neighbors need vel from last frame to update their velocity vel2[index] = new_vel; } /** * LOOK-1.2 Since this is pretty trivial, we implemented it for you. * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. // LOOK-2.3 Looking at this method, what would be the most memory efficient // order for iterating over neighboring grid cells? // for(x) // for(y) // for(z)? Or some other order? // Ans: I think it is // for(z) // for(y) // for(x) __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { return x + y * gridResolution + z * gridResolution * gridResolution; } // This function returns -1 if any dimension falls out of boundry __device__ int gridIndex3Dto1DWithCheck(int x, int y, int z, int gridResolution) { if (x < 0 || x >= gridResolution || y < 0 || y >= gridResolution || z < 0 || z >= gridResolution) { return -1; } else { return gridIndex3Dto1D(x, y, z, gridResolution); } } __device__ glm::vec3 gridIndex3DFromPosition(const glm::vec3& pos , const glm::vec3& grid_min, float inverse_cell_width) { return (pos - grid_min) * inverse_cell_width; } __device__ int gridIndexFromPosition(const glm::vec3& pos, const glm::vec3& grid_min , float inverse_cell_width, int grid_resolution) { auto gridindex_3d = gridIndex3DFromPosition(pos, grid_min, inverse_cell_width); auto gridindex = gridIndex3Dto1D(static_cast<int>(gridindex_3d.x) , static_cast<int>(gridindex_3d.y) , static_cast<int>(gridindex_3d.z) , grid_resolution); return gridindex; } __global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *indices, int *gridIndices) { // DONE-2.1 // - Label each boid with the index of its grid cell. // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 auto index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } indices[index] = index; gridIndices[index] = gridIndexFromPosition(pos[index], gridMin , inverseCellWidth, gridResolution); } // LOOK-2.1 Consider how this could be useful for indicating that a cell // does not enclose any boids __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { // DONE-2.1 // Identify the start point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" // index is sorted index, not boid index! auto index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } auto prev_index = (index - 1 + N) % N; // To minimize branches if (particleGridIndices[index] != particleGridIndices[prev_index]) { gridCellStartIndices[particleGridIndices[index]] = index; gridCellEndIndices[particleGridIndices[prev_index]] = prev_index + 1; // intended to make the end side open like most C++ ranges } } // Helper function to determine direction (1 or -1) of the neighbor // grid cell in one dimension __device__ int directionOfNeighborSingleDimension(float axis_index) { if (static_cast<int>(axis_index + 0.5f) > static_cast<int>(axis_index)) { return 1; } else { return -1; } } __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridResolution, glm::vec3 gridMin , float inverseCellWidth, float cellWidth , int *gridCellStartIndices, int *gridCellEndIndices , int *particleArrayIndices, glm::vec3 *pos , glm::vec3 *vel1, glm::vec3 *vel2) { // DONE-2.1 - Update a boid's velocity using the uniform grid to reduce // the number of boids that need to be checked. auto index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } // - Identify the grid cell that this particle is in auto boid_index = particleArrayIndices[index]; auto gridcell_index3d = gridIndex3DFromPosition(pos[boid_index], gridMin , inverseCellWidth); // - Identify which cells may contain neighbors. This isn't always 8. auto sign_x = directionOfNeighborSingleDimension(gridcell_index3d.x); auto sign_y = directionOfNeighborSingleDimension(gridcell_index3d.y); auto sign_z = directionOfNeighborSingleDimension(gridcell_index3d.z); auto index_x = static_cast<int>(gridcell_index3d.x); auto index_y = static_cast<int>(gridcell_index3d.y); auto index_z = static_cast<int>(gridcell_index3d.z); int possible_neighbor_indices[8]; possible_neighbor_indices[0] = gridIndex3Dto1DWithCheck(index_x, index_y, index_z , gridResolution); possible_neighbor_indices[1] = gridIndex3Dto1DWithCheck(index_x + sign_x, index_y, index_z , gridResolution); possible_neighbor_indices[2] = gridIndex3Dto1DWithCheck(index_x, index_y + sign_y, index_z , gridResolution); possible_neighbor_indices[3] = gridIndex3Dto1DWithCheck(index_x, index_y, index_z + sign_z , gridResolution); possible_neighbor_indices[4] = gridIndex3Dto1DWithCheck(index_x + sign_x, index_y + sign_y, index_z , gridResolution); possible_neighbor_indices[5] = gridIndex3Dto1DWithCheck(index_x + sign_x, index_y, index_z + sign_z , gridResolution); possible_neighbor_indices[6] = gridIndex3Dto1DWithCheck(index_x, index_y + sign_y, index_z + sign_z , gridResolution); possible_neighbor_indices[7] = gridIndex3Dto1DWithCheck(index_x + sign_x, index_y + sign_y, index_z + sign_z , gridResolution); // - For each cell, read the start/end indices in the boid pointer array. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. glm::vec3 delta_vel(0.0f); glm::vec3 rule1_neighbor_pos_sum(0.0f); float rule1_neighbor_count = 0.0f; glm::vec3 rule2_total_offset(0.0f); glm::vec3 rule3_neighbor_vel_sum(0.0f); float rule3_neighbor_count = 0.0f; float current_distance; glm::vec3 current_pos_offset; int iother; for (const auto neighbor_cell_index : possible_neighbor_indices) { if (neighbor_cell_index == -1) { continue; } for (int i = gridCellStartIndices[neighbor_cell_index] ; i < gridCellEndIndices[neighbor_cell_index] ; i++) { iother = particleArrayIndices[i]; if (iother == boid_index){ continue; } current_pos_offset = pos[iother] - pos[boid_index]; current_distance = glm::length(current_pos_offset); // Rule 1: Get neighbor position sum and neighbor count for rule1 if (current_distance < rule1Distance) { rule1_neighbor_pos_sum += pos[iother]; rule1_neighbor_count += 1.0f; } // Rule 2: Calculate offset for rule 2 if (current_distance < rule2Distance) { rule2_total_offset -= current_pos_offset; } // Rule 3: Get velocity sum and neighbor count for rule 3 if (current_distance < rule3Distance) { rule3_neighbor_vel_sum += vel1[iother]; rule3_neighbor_count += 1.0f; } } } // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (rule1_neighbor_count > 0.0f) { delta_vel += rule1Scale * ((rule1_neighbor_pos_sum / rule1_neighbor_count) - pos[boid_index]); } // Rule 2: boids try to stay a distance d away from each other delta_vel += rule2Scale * rule2_total_offset; // Rule 3: boids try to match the speed of surrounding boids if (rule3_neighbor_count > 0.0f) { delta_vel += rule3Scale * ((rule3_neighbor_vel_sum / rule3_neighbor_count) - vel1[boid_index]); } auto new_vel = vel1[boid_index] + delta_vel; // - Clamp the speed change before putting the new speed in vel2 auto speed = glm::length(new_vel); if (speed > maxSpeed) { new_vel = new_vel * (maxSpeed / speed); } vel2[boid_index] = new_vel; } __global__ void kernReshuffle(int N, glm::vec3* pos , glm::vec3* vel, glm::vec3* pos_shuffler, glm::vec3* vel_shuffler , int* particleArrayIndices) { // Reshuffle pos and vel using particleArrayIndices // , store in pos_shuffler and vel_shuffler auto index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } pos_shuffler[index] = pos[particleArrayIndices[index]]; vel_shuffler[index] = vel[particleArrayIndices[index]]; } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // DONE-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered, // except with one less level of indirection. // This should expect gridCellStartIndices and gridCellEndIndices to refer // directly to pos and vel1. auto index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } // - Identify the grid cell that this particle is in auto gridcell_index3d = gridIndex3DFromPosition(pos[index], gridMin , inverseCellWidth); // - Identify which cells may contain neighbors. This isn't always 8. auto sign_x = directionOfNeighborSingleDimension(gridcell_index3d.x); auto sign_y = directionOfNeighborSingleDimension(gridcell_index3d.y); auto sign_z = directionOfNeighborSingleDimension(gridcell_index3d.z); auto index_x = static_cast<int>(gridcell_index3d.x); auto index_y = static_cast<int>(gridcell_index3d.y); auto index_z = static_cast<int>(gridcell_index3d.z); int possible_neighbor_indices[8]; // CHANGE: order of this array changed from 2.1 to match the best for(z): for(y): for(x) order int index_z_smaller = imin(index_z, index_z + sign_z); int index_z_bigger = imax(index_z, index_z + sign_z); int index_y_smaller = imin(index_y, index_y + sign_y); int index_y_bigger = imax(index_y, index_y + sign_y); int index_x_smaller = imin(index_x, index_x + sign_x); int index_x_bigger = imax(index_x, index_x + sign_x); possible_neighbor_indices[0] = gridIndex3Dto1DWithCheck(index_x_smaller, index_y_smaller, index_z_smaller , gridResolution); possible_neighbor_indices[1] = gridIndex3Dto1DWithCheck(index_x_bigger, index_y_smaller, index_z_smaller , gridResolution); possible_neighbor_indices[2] = gridIndex3Dto1DWithCheck(index_x_smaller, index_y_bigger, index_z_smaller , gridResolution); possible_neighbor_indices[3] = gridIndex3Dto1DWithCheck(index_x_bigger, index_y_bigger, index_z_smaller , gridResolution); possible_neighbor_indices[4] = gridIndex3Dto1DWithCheck(index_x_smaller, index_y_smaller, index_z_bigger , gridResolution); possible_neighbor_indices[5] = gridIndex3Dto1DWithCheck(index_x_bigger, index_y_smaller, index_z_bigger , gridResolution); possible_neighbor_indices[6] = gridIndex3Dto1DWithCheck(index_x_smaller, index_y_bigger, index_z_bigger , gridResolution); possible_neighbor_indices[7] = gridIndex3Dto1DWithCheck(index_x_bigger, index_y_bigger, index_z_bigger , gridResolution); // - For each cell, read the start/end indices in the boid pointer array. // DIFFERENCE: For best results, consider what order the cells should be // checked in to maximize the memory benefits of reordering the boids data. // // CHANGE: order of the array (possible_neighbor_indices) above already changed // so the order of the cells checked become // for(z): for(y): for(x) glm::vec3 delta_vel(0.0f); glm::vec3 rule1_neighbor_pos_sum(0.0f); float rule1_neighbor_count = 0.0f; glm::vec3 rule2_total_offset(0.0f); glm::vec3 rule3_neighbor_vel_sum(0.0f); float rule3_neighbor_count = 0.0f; float current_distance; glm::vec3 current_pos_offset; for (const auto neighbor_cell_index : possible_neighbor_indices) { if (neighbor_cell_index == -1) { continue; } // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. for (int iother = gridCellStartIndices[neighbor_cell_index] ; iother < gridCellEndIndices[neighbor_cell_index] ; iother++) { if (iother == index){ continue; } current_pos_offset = pos[iother] - pos[index]; current_distance = glm::length(current_pos_offset); // Rule 1: Get neighbor position sum and neighbor count for rule1 if (current_distance < rule1Distance) { rule1_neighbor_pos_sum += pos[iother]; rule1_neighbor_count += 1.0f; } // Rule 2: Calculate offset for rule 2 if (current_distance < rule2Distance) { rule2_total_offset -= current_pos_offset; } // Rule 3: Get velocity sum and neighbor count for rule 3 if (current_distance < rule3Distance) { rule3_neighbor_vel_sum += vel1[iother]; rule3_neighbor_count += 1.0f; } } } // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (rule1_neighbor_count > 0.0f) { delta_vel += rule1Scale * ((rule1_neighbor_pos_sum / rule1_neighbor_count) - pos[index]); } // Rule 2: boids try to stay a distance d away from each other delta_vel += rule2Scale * rule2_total_offset; // Rule 3: boids try to match the speed of surrounding boids if (rule3_neighbor_count > 0.0f) { delta_vel += rule3Scale * ((rule3_neighbor_vel_sum / rule3_neighbor_count) - vel1[index]); } auto new_vel = vel1[index] + delta_vel; // - Clamp the speed change before putting the new speed in vel2 auto speed = glm::length(new_vel); if (speed > maxSpeed) { new_vel = new_vel * (maxSpeed / speed); } vel2[index] = new_vel; } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { // DONE-1.2 - use the kernels you wrote to step the simulation forward in time. dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernUpdateVelocityBruteForce<<<fullBlocksPerGrid, blockSize >>>(numObjects, dev_pos, dev_vel1, dev_vel2); kernUpdatePos <<<fullBlocksPerGrid, blockSize >>>(numObjects, dt, dev_pos, dev_vel1); // DONE-1.2 ping-pong the velocity buffers // swap pointers so current velocity become new velocity std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationScatteredGrid(float dt) { dim3 fullBlocksPerGridForBoids((numObjects + blockSize - 1) / blockSize); dim3 fullBlocksPerGridForCells((gridCellCount + blockSize - 1) / blockSize); // DONE-2.1 // Uniform Grid Neighbor search using Thrust sort. // In Parallel: // - label each particle with its array index as well as its grid index. // Use 2x width grids. kernComputeIndices << <fullBlocksPerGridForBoids, blockSize >> >(numObjects, gridSideCount , gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects , dev_thrust_particleArrayIndices); // reset start and end buffer since not all values are set during kernIdentifyCellStartEnd kernResetIntBuffer <<<fullBlocksPerGridForCells, blockSize >>>(gridCellCount , dev_gridCellStartIndices, -1); kernResetIntBuffer <<<fullBlocksPerGridForCells, blockSize >>>(gridCellCount , dev_gridCellEndIndices, -1); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices kernIdentifyCellStartEnd <<<fullBlocksPerGridForBoids, blockSize >>>(numObjects , dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); // - Perform velocity updates using neighbor search kernUpdateVelNeighborSearchScattered <<<fullBlocksPerGridForBoids, blockSize >>>(numObjects , gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices , dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2); // - Update positions kernUpdatePos <<<fullBlocksPerGridForBoids, blockSize >>>(numObjects, dt, dev_pos, dev_vel1); // - Ping-pong buffers as needed std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationCoherentGrid(float dt) { dim3 fullBlocksPerGridForBoids((numObjects + blockSize - 1) / blockSize); dim3 fullBlocksPerGridForCells((gridCellCount + blockSize - 1) / blockSize); // DONE-2.3 - start by copying Boids::stepSimulationNaiveGrid // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. // In Parallel: // - Label each particle with its array index as well as its grid index. // Use 2x width grids kernComputeIndices <<<fullBlocksPerGridForBoids, blockSize >>>(numObjects, gridSideCount , gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects , dev_thrust_particleArrayIndices); // reset start and end buffer since not all values are set during kernIdentifyCellStartEnd kernResetIntBuffer <<<fullBlocksPerGridForCells, blockSize >>>(gridCellCount , dev_gridCellStartIndices, -1); kernResetIntBuffer <<<fullBlocksPerGridForCells, blockSize >>>(gridCellCount , dev_gridCellEndIndices, -1); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices kernIdentifyCellStartEnd <<<fullBlocksPerGridForBoids, blockSize >>>(numObjects , dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); // - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all // the particle data in the simulation array. // CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED kernReshuffle <<<fullBlocksPerGridForBoids, blockSize >>>(numObjects, dev_pos , dev_vel1, dev_pos_shuffler, dev_vel_shuffler, dev_particleArrayIndices); std::swap(dev_pos, dev_pos_shuffler); std::swap(dev_vel1, dev_vel_shuffler); // - Perform velocity updates using neighbor search kernUpdateVelNeighborSearchCoherent <<<fullBlocksPerGridForBoids, blockSize >>>(numObjects , gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices , dev_gridCellEndIndices, dev_pos, dev_vel1, dev_vel2); // - Update positions kernUpdatePos <<<fullBlocksPerGridForBoids, blockSize >>>(numObjects, dt, dev_pos, dev_vel1); // - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. std::swap(dev_vel1, dev_vel2); } void Boids::endSimulation() { cudaFree(dev_vel1); cudaFree(dev_vel2); cudaFree(dev_pos); // DONE-2.1 DONE-2.3 - Free any additional buffers here. cudaFree(dev_particleArrayIndices); cudaFree(dev_particleGridIndices); cudaFree(dev_gridCellStartIndices); cudaFree(dev_gridCellEndIndices); cudaFree(dev_pos_shuffler); cudaFree(dev_vel_shuffler); } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; int *intKeys = new int[N]; int *intValues = new int[N]; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; cudaMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!"); cudaMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice); cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost); cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup delete[] intKeys; delete[] intValues; cudaFree(dev_intKeys); cudaFree(dev_intValues); checkCUDAErrorWithLine("cudaFree failed!"); return; }
0f0db5e88db0a5bd25d88890e54252d77449dba4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THH.h" #include "THHTensorMath.h" #include "THHGeneral.h" #include "THHBlas.h" #include "THHTensorCopy.h" #include "THHTensorRandom.h" #include "THHHalf.h" #include "THHApply.cuh" #include "THHReduce.cuh" #include "THHDeviceUtils.cuh" #include "THHNumerics.cuh" #include "THHAtomics.cuh" #include "THHThrustAllocator.cuh" #include "THHTensorSort.cuh" #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <algorithm> // for std::min // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexCopyLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexCopySmallIndex(TensorInfo<T, IndexType> dst, TensorInfo<T, IndexType> src, TensorInfo<int64_t, IndexType> indices, int dstCopyDim, int srcCopyDim, IndexType innerSize, int64_t dstCopyDimSize) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) { // Lua indices begin at 1 IndexType dstIndex = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE; assert(dstIndex < dstCopyDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstCopyDim]; IndexType srcOffset = IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcCopyDim]; dst.data[dstOffset] = src.data[srcOffset]; } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexCopySmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexCopyLargeIndex(TensorInfo<T, IndexType> dst, TensorInfo<T, IndexType> src, TensorInfo<int64_t, IndexType> indices, int dstCopyDim, int srcCopyDim, IndexType totalSize, IndexType innerSize, int64_t dstCopyDimSize) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType srcIndex, elementInSlice; if (IndexIsMajor) { srcIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; srcIndex = linearIndex % innerSize; } // Lua indices begin at 1 IndexType dstIndex = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE; assert(dstIndex < dstCopyDimSize); IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstCopyDim]; IndexType srcOffset = IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcCopyDim]; dst.data[dstOffset] = src.data[srcOffset]; } } // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexAddLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexAddSmallIndex(TensorInfo<T, IndexType> dst, TensorInfo<T, IndexType> src, TensorInfo<int64_t, IndexType> indices, int dstAddDim, int srcAddDim, IndexType innerSize, int64_t dstAddDimSize) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) { // Lua indices begin at 1 IndexType dstIndex = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE; assert(dstIndex < dstAddDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcAddDim]; atomicAdd(&dst.data[dstOffset], src.data[srcOffset]); } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexAddSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexAddLargeIndex(TensorInfo<T, IndexType> dst, TensorInfo<T, IndexType> src, TensorInfo<int64_t, IndexType> indices, int dstAddDim, int srcAddDim, IndexType totalSize, IndexType innerSize, int64_t dstAddDimSize) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType srcIndex, elementInSlice; if (IndexIsMajor) { srcIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; srcIndex = linearIndex % innerSize; } // Lua indices begin at 1 IndexType dstIndex = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE; assert(dstIndex < dstAddDimSize); IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcAddDim]; atomicAdd(&dst.data[dstOffset], src.data[srcOffset]); } } // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexFillLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndexType, int DstDim, int IdxDim> __global__ void indexFillSmallIndex(TensorInfo<T, IndexType> dst, TensorInfo<int64_t, IndexType> indices, int dstFillDim, IndexType innerSize, int64_t dstFillDimSize, T val) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) { // Lua indices begin at 1 IndexType dstIndex_ = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE; assert(dstIndex_ < dstFillDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex_ * dst.strides[dstFillDim]; dst.data[dstOffset] = val; } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexFillSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndexType, int DstDim, int IdxDim, bool IndexIsMajor> __global__ void indexFillLargeIndex(TensorInfo<T, IndexType> dst, TensorInfo<int64_t, IndexType> indices, int dstFillDim, IndexType totalSize, IndexType innerSize, int64_t dstFillDimSize, T val) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstIndex, elementInSlice; if (IndexIsMajor) { dstIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; dstIndex = linearIndex % innerSize; } // Lua indices begin at 1 IndexType dstIndex_ = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE; assert(dstIndex_ < dstFillDimSize); IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex_ * dst.strides[dstFillDim]; dst.data[dstOffset] = val; } } // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexSelectLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexSelectSmallIndex(TensorInfo<T, IndexType> dst, TensorInfo<T, IndexType> src, TensorInfo<int64_t, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType innerSize, int64_t srcSelectDimSize) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) { // Lua indices begin at 1 IndexType srcIndex = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE; assert(srcIndex < srcSelectDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexSelectSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexSelectLargeIndex(TensorInfo<T, IndexType> dst, TensorInfo<T, IndexType> src, TensorInfo<int64_t, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType totalSize, IndexType innerSize, int64_t srcSelectDimSize) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstIndex, elementInSlice; if (IndexIsMajor) { dstIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; dstIndex = linearIndex % innerSize; } // Lua indices begin at 1 IndexType srcIndex = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE; assert(srcIndex < srcSelectDimSize); IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } template <int Dims, typename T, typename IndexType> __device__ __forceinline__ IndexType indexToOffset( const TensorInfo<T, IndexType>& info, int64_t index, IndexType size) { IndexType linearIndex = static_cast<IndexType>(index); assert(linearIndex < size && linearIndex >= -size); if (linearIndex < 0) { linearIndex += size; } return IndexToOffset<T, IndexType, Dims>::get(linearIndex, info) - TH_INDEX_BASE; } struct WrapIndexOp { WrapIndexOp(int64_t size) : size(size) {} __device__ __forceinline__ void operator()(int64_t* out, int64_t* in) { auto idx = *in; assert(idx < size && idx >= -size); *out = idx < 0 ? idx + size : idx; } int64_t size; }; template <typename T, typename IndexType, int Dims> struct TensorTakeOp { TensorTakeOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*) : info(info), numel(numel) {} __device__ __forceinline__ void operator()(T* out, int64_t* index) { auto offset = indexToOffset<Dims>(info, *index, numel); *out = info.data[offset]; } const TensorInfo<T, IndexType> info; IndexType numel; }; template <typename T, typename IndexType, int Dims> struct TensorPutOp { TensorPutOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*) : info(info), numel(numel) {} __device__ __forceinline__ void operator()(T* value, int64_t* index) { auto offset = indexToOffset<Dims>(info, *index, numel); info.data[offset] = *value; } const TensorInfo<T, IndexType> info; IndexType numel; }; template <typename T, typename IndexType, int Dims> struct TensorPutAccumulateOp { TensorPutAccumulateOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t* start, int64_t* end) : info(info), numel(numel), start(start), end(end) {} __device__ __forceinline__ void operator()(T* value, int64_t* index) { if (index == start || *index != *(index - 1)) { int64_t linear_index = *index; auto offset = indexToOffset<Dims>(info, linear_index, numel); do { info.data[offset] = THCNumerics<T>::add(info.data[offset], *value); index++; value++; } while (index != end && *index == linear_index); } } const TensorInfo<T, IndexType> info; IndexType numel; int64_t* start; int64_t* end; }; template<typename IndexType, typename real, template<class, class, int> class Op, typename TensorType> void dispatchTakePutImpl(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) { // These are only valid if index is contiguous auto start = THCudaLongTensor_data(state, index); auto end = start + THCudaLongTensor_numel(state, index); auto aInfo = getTensorInfo<TensorType, IndexType>(state, a); aInfo.collapseDims(); auto numel = TensorUtils<TensorType>::getNumElements(state, a); if (aInfo.isContiguous()) { auto op = Op<real, IndexType, -2>(aInfo, numel, start, end); THC_pointwiseApply2(state, b, index, op); } else { auto op = Op<real, IndexType, -1>(aInfo, numel, start, end); THC_pointwiseApply2(state, b, index, op); } } template<typename real, template<class, class, int> class Op, typename TensorType> void dispatchTakePut(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) { if (TensorUtils<TensorType>::canUse32BitIndexMath(state, a, INT_MAX)) { dispatchTakePutImpl<int32_t, real, Op>(state, a, b, index); } else { dispatchTakePutImpl<int64_t, real, Op>(state, a, b, index); } } #include "generic/THCTensorIndex.cu" #include "THHGenerateAllTypes.h"
0f0db5e88db0a5bd25d88890e54252d77449dba4.cu
#include "THC.h" #include "THCTensorMath.h" #include "THCGeneral.h" #include "THCBlas.h" #include "THCTensorCopy.h" #include "THCTensorRandom.h" #include "THCHalf.h" #include "THCApply.cuh" #include "THCReduce.cuh" #include "THCDeviceUtils.cuh" #include "THCNumerics.cuh" #include "THCAtomics.cuh" #include "THCThrustAllocator.cuh" #include "THCTensorSort.cuh" #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <algorithm> // for std::min // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexCopyLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexCopySmallIndex(TensorInfo<T, IndexType> dst, TensorInfo<T, IndexType> src, TensorInfo<int64_t, IndexType> indices, int dstCopyDim, int srcCopyDim, IndexType innerSize, int64_t dstCopyDimSize) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) { // Lua indices begin at 1 IndexType dstIndex = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE; assert(dstIndex < dstCopyDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstCopyDim]; IndexType srcOffset = IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcCopyDim]; dst.data[dstOffset] = src.data[srcOffset]; } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexCopySmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexCopyLargeIndex(TensorInfo<T, IndexType> dst, TensorInfo<T, IndexType> src, TensorInfo<int64_t, IndexType> indices, int dstCopyDim, int srcCopyDim, IndexType totalSize, IndexType innerSize, int64_t dstCopyDimSize) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType srcIndex, elementInSlice; if (IndexIsMajor) { srcIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; srcIndex = linearIndex % innerSize; } // Lua indices begin at 1 IndexType dstIndex = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE; assert(dstIndex < dstCopyDimSize); IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstCopyDim]; IndexType srcOffset = IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcCopyDim]; dst.data[dstOffset] = src.data[srcOffset]; } } // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexAddLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexAddSmallIndex(TensorInfo<T, IndexType> dst, TensorInfo<T, IndexType> src, TensorInfo<int64_t, IndexType> indices, int dstAddDim, int srcAddDim, IndexType innerSize, int64_t dstAddDimSize) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) { // Lua indices begin at 1 IndexType dstIndex = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE; assert(dstIndex < dstAddDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcAddDim]; atomicAdd(&dst.data[dstOffset], src.data[srcOffset]); } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexAddSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexAddLargeIndex(TensorInfo<T, IndexType> dst, TensorInfo<T, IndexType> src, TensorInfo<int64_t, IndexType> indices, int dstAddDim, int srcAddDim, IndexType totalSize, IndexType innerSize, int64_t dstAddDimSize) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType srcIndex, elementInSlice; if (IndexIsMajor) { srcIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; srcIndex = linearIndex % innerSize; } // Lua indices begin at 1 IndexType dstIndex = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE; assert(dstIndex < dstAddDimSize); IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcAddDim]; atomicAdd(&dst.data[dstOffset], src.data[srcOffset]); } } // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexFillLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndexType, int DstDim, int IdxDim> __global__ void indexFillSmallIndex(TensorInfo<T, IndexType> dst, TensorInfo<int64_t, IndexType> indices, int dstFillDim, IndexType innerSize, int64_t dstFillDimSize, T val) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) { // Lua indices begin at 1 IndexType dstIndex_ = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE; assert(dstIndex_ < dstFillDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex_ * dst.strides[dstFillDim]; dst.data[dstOffset] = val; } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexFillSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndexType, int DstDim, int IdxDim, bool IndexIsMajor> __global__ void indexFillLargeIndex(TensorInfo<T, IndexType> dst, TensorInfo<int64_t, IndexType> indices, int dstFillDim, IndexType totalSize, IndexType innerSize, int64_t dstFillDimSize, T val) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstIndex, elementInSlice; if (IndexIsMajor) { dstIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; dstIndex = linearIndex % innerSize; } // Lua indices begin at 1 IndexType dstIndex_ = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE; assert(dstIndex_ < dstFillDimSize); IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex_ * dst.strides[dstFillDim]; dst.data[dstOffset] = val; } } // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexSelectLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexSelectSmallIndex(TensorInfo<T, IndexType> dst, TensorInfo<T, IndexType> src, TensorInfo<int64_t, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType innerSize, int64_t srcSelectDimSize) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) { // Lua indices begin at 1 IndexType srcIndex = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE; assert(srcIndex < srcSelectDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexSelectSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexSelectLargeIndex(TensorInfo<T, IndexType> dst, TensorInfo<T, IndexType> src, TensorInfo<int64_t, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType totalSize, IndexType innerSize, int64_t srcSelectDimSize) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstIndex, elementInSlice; if (IndexIsMajor) { dstIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; dstIndex = linearIndex % innerSize; } // Lua indices begin at 1 IndexType srcIndex = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE; assert(srcIndex < srcSelectDimSize); IndexType dstOffset = IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } template <int Dims, typename T, typename IndexType> __device__ __forceinline__ IndexType indexToOffset( const TensorInfo<T, IndexType>& info, int64_t index, IndexType size) { IndexType linearIndex = static_cast<IndexType>(index); assert(linearIndex < size && linearIndex >= -size); if (linearIndex < 0) { linearIndex += size; } return IndexToOffset<T, IndexType, Dims>::get(linearIndex, info) - TH_INDEX_BASE; } struct WrapIndexOp { WrapIndexOp(int64_t size) : size(size) {} __device__ __forceinline__ void operator()(int64_t* out, int64_t* in) { auto idx = *in; assert(idx < size && idx >= -size); *out = idx < 0 ? idx + size : idx; } int64_t size; }; template <typename T, typename IndexType, int Dims> struct TensorTakeOp { TensorTakeOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*) : info(info), numel(numel) {} __device__ __forceinline__ void operator()(T* out, int64_t* index) { auto offset = indexToOffset<Dims>(info, *index, numel); *out = info.data[offset]; } const TensorInfo<T, IndexType> info; IndexType numel; }; template <typename T, typename IndexType, int Dims> struct TensorPutOp { TensorPutOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*) : info(info), numel(numel) {} __device__ __forceinline__ void operator()(T* value, int64_t* index) { auto offset = indexToOffset<Dims>(info, *index, numel); info.data[offset] = *value; } const TensorInfo<T, IndexType> info; IndexType numel; }; template <typename T, typename IndexType, int Dims> struct TensorPutAccumulateOp { TensorPutAccumulateOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t* start, int64_t* end) : info(info), numel(numel), start(start), end(end) {} __device__ __forceinline__ void operator()(T* value, int64_t* index) { if (index == start || *index != *(index - 1)) { int64_t linear_index = *index; auto offset = indexToOffset<Dims>(info, linear_index, numel); do { info.data[offset] = THCNumerics<T>::add(info.data[offset], *value); index++; value++; } while (index != end && *index == linear_index); } } const TensorInfo<T, IndexType> info; IndexType numel; int64_t* start; int64_t* end; }; template<typename IndexType, typename real, template<class, class, int> class Op, typename TensorType> void dispatchTakePutImpl(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) { // These are only valid if index is contiguous auto start = THCudaLongTensor_data(state, index); auto end = start + THCudaLongTensor_numel(state, index); auto aInfo = getTensorInfo<TensorType, IndexType>(state, a); aInfo.collapseDims(); auto numel = TensorUtils<TensorType>::getNumElements(state, a); if (aInfo.isContiguous()) { auto op = Op<real, IndexType, -2>(aInfo, numel, start, end); THC_pointwiseApply2(state, b, index, op); } else { auto op = Op<real, IndexType, -1>(aInfo, numel, start, end); THC_pointwiseApply2(state, b, index, op); } } template<typename real, template<class, class, int> class Op, typename TensorType> void dispatchTakePut(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) { if (TensorUtils<TensorType>::canUse32BitIndexMath(state, a, INT_MAX)) { dispatchTakePutImpl<int32_t, real, Op>(state, a, b, index); } else { dispatchTakePutImpl<int64_t, real, Op>(state, a, b, index); } } #include "generic/THCTensorIndex.cu" #include "THCGenerateAllTypes.h"
cd461c95b49f808f320c143a438266659ee32bde.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governin_from_mtxg permissions and * limitations under the License. */ #include <utilities/base_fixture.hpp> #include <utilities/test_utilities.hpp> #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 700 #include <experimental/graph.hpp> #else #include <experimental/louvain.cuh> #endif #include <algorithms.hpp> #include <raft/cudart_utils.h> #include <raft/handle.hpp> #include <rmm/mr/device/cuda_memory_resource.hpp> #include <gtest/gtest.h> #include <algorithm> #include <iterator> #include <limits> #include <numeric> #include <vector> typedef struct Louvain_Usecase_t { std::string graph_file_full_path{}; bool test_weighted{false}; Louvain_Usecase_t(std::string const& graph_file_path, bool test_weighted) : test_weighted(test_weighted) { if ((graph_file_path.length() > 0) && (graph_file_path[0] != '/')) { graph_file_full_path = cugraph::test::get_rapids_dataset_root_dir() + "/" + graph_file_path; } else { graph_file_full_path = graph_file_path; } }; } Louvain_Usecase; class Tests_Louvain : public ::testing::TestWithParam<Louvain_Usecase> { public: Tests_Louvain() {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() {} virtual void TearDown() {} template <typename vertex_t, typename edge_t, typename weight_t, typename result_t> void run_current_test(Louvain_Usecase const& configuration) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 700 CUGRAPH_FAIL("Louvain not supported on Pascal and older architectures"); #else raft::handle_t handle{}; std::cout << "read graph file: " << configuration.graph_file_full_path << std::endl; auto graph = cugraph::test::read_graph_from_matrix_market_file<vertex_t, edge_t, weight_t, false>( handle, configuration.graph_file_full_path, configuration.test_weighted); auto graph_view = graph.view(); louvain(graph_view); #endif } template <typename graph_t> void louvain(graph_t const& graph_view) { using vertex_t = typename graph_t::vertex_type; using weight_t = typename graph_t::weight_type; raft::handle_t handle{}; rmm::device_vector<vertex_t> clustering_v(graph_view.get_number_of_local_vertices()); size_t level; weight_t modularity; std::tie(level, modularity) = cugraph::louvain(handle, graph_view, clustering_v.data().get(), size_t{100}, weight_t{1}); CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement std::cout << "level = " << level << std::endl; std::cout << "modularity = " << modularity << std::endl; } }; // FIXME: add tests for type combinations TEST_P(Tests_Louvain, CheckInt32Int32FloatFloat) { run_current_test<int32_t, int32_t, float, float>(GetParam()); } INSTANTIATE_TEST_CASE_P(simple_test, Tests_Louvain, ::testing::Values(Louvain_Usecase("test/datasets/karate.mtx", true) #if 0 , Louvain_Usecase("test/datasets/web-Google.mtx", true), Louvain_Usecase("test/datasets/ljournal-2008.mtx", true), Louvain_Usecase("test/datasets/webbase-1M.mtx", true) #endif )); CUGRAPH_TEST_PROGRAM_MAIN()
cd461c95b49f808f320c143a438266659ee32bde.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governin_from_mtxg permissions and * limitations under the License. */ #include <utilities/base_fixture.hpp> #include <utilities/test_utilities.hpp> #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 700 #include <experimental/graph.hpp> #else #include <experimental/louvain.cuh> #endif #include <algorithms.hpp> #include <raft/cudart_utils.h> #include <raft/handle.hpp> #include <rmm/mr/device/cuda_memory_resource.hpp> #include <gtest/gtest.h> #include <algorithm> #include <iterator> #include <limits> #include <numeric> #include <vector> typedef struct Louvain_Usecase_t { std::string graph_file_full_path{}; bool test_weighted{false}; Louvain_Usecase_t(std::string const& graph_file_path, bool test_weighted) : test_weighted(test_weighted) { if ((graph_file_path.length() > 0) && (graph_file_path[0] != '/')) { graph_file_full_path = cugraph::test::get_rapids_dataset_root_dir() + "/" + graph_file_path; } else { graph_file_full_path = graph_file_path; } }; } Louvain_Usecase; class Tests_Louvain : public ::testing::TestWithParam<Louvain_Usecase> { public: Tests_Louvain() {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() {} virtual void TearDown() {} template <typename vertex_t, typename edge_t, typename weight_t, typename result_t> void run_current_test(Louvain_Usecase const& configuration) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 700 CUGRAPH_FAIL("Louvain not supported on Pascal and older architectures"); #else raft::handle_t handle{}; std::cout << "read graph file: " << configuration.graph_file_full_path << std::endl; auto graph = cugraph::test::read_graph_from_matrix_market_file<vertex_t, edge_t, weight_t, false>( handle, configuration.graph_file_full_path, configuration.test_weighted); auto graph_view = graph.view(); louvain(graph_view); #endif } template <typename graph_t> void louvain(graph_t const& graph_view) { using vertex_t = typename graph_t::vertex_type; using weight_t = typename graph_t::weight_type; raft::handle_t handle{}; rmm::device_vector<vertex_t> clustering_v(graph_view.get_number_of_local_vertices()); size_t level; weight_t modularity; std::tie(level, modularity) = cugraph::louvain(handle, graph_view, clustering_v.data().get(), size_t{100}, weight_t{1}); CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement std::cout << "level = " << level << std::endl; std::cout << "modularity = " << modularity << std::endl; } }; // FIXME: add tests for type combinations TEST_P(Tests_Louvain, CheckInt32Int32FloatFloat) { run_current_test<int32_t, int32_t, float, float>(GetParam()); } INSTANTIATE_TEST_CASE_P(simple_test, Tests_Louvain, ::testing::Values(Louvain_Usecase("test/datasets/karate.mtx", true) #if 0 , Louvain_Usecase("test/datasets/web-Google.mtx", true), Louvain_Usecase("test/datasets/ljournal-2008.mtx", true), Louvain_Usecase("test/datasets/webbase-1M.mtx", true) #endif )); CUGRAPH_TEST_PROGRAM_MAIN()
b3d90784c71ebd8887835eb2e745faf34d85fc1a.hip
// !!! This is a file automatically generated by hipify!!! //ulimit -s unlimited //nvcc -lcusparse test.cu //nvcc -lcublas -lcusparse -arch sm_20 test.cu && ./a.out #include <iostream> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #include "hip/device_functions.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hip/hip_runtime.h> #include "rocblas.h" #include "hipsparse.h" #include <sstream> #include "ttd.h" #include "kernels.h" #include "helpers.h" #include "SPBLASTK/include/spblas.h" #define TOTALTHREDSPERBLOCK 256 #define COLUMNLENGTH 4 #define NMAXITERKernel 100 #define ROWSCALE 2 using namespace std; __global__ void RCDMKernel(float *A, int*R_Idx, int* n, float*residuals, float*x, float * lambda, float* Li, hiprandState_t* cstate); void timeComparison(int row, int col, int exampleType, float executionTime, float lambda) { double objVal = 0; int dim1 = 14; int dim2 = 1; float PRCDMExecutionTime = executionTime; float SRCDMExecutionTime = executionTime; int m, n; clock_t t1; float * A_TTD; int * Row_IDX; int * Col_IDX; int nnzElements; int* nodeDescription; //-------------------------------GET BOUNDARY POINTS int * boundary; int boundarySize = 0; getBoundaryVector(row, col, exampleType, &boundary, &boundarySize); cout << "Boundary size is " << boundarySize << "\n"; //-------------------------------GENERATE PROBLEMS t1 = clock(); generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX, &Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription, ROWSCALE); print_time_message(&t1, "Getting problem dimension"); cout << "Dimension of your problem is " << m << " x " << n << "\n"; printf("Number of NNZ: %d\n", nnzElements); //-------------------------------GET FORCE VECTORS float* g; float residuals[m]; float b[m]; float* redisuals_dev; getForceVector(&g, m, row, col, exampleType); for (int i = 0; i < m; i++) { g[i] = -g[i]; residuals[i] = g[i]; b[i] = -g[i]; } print_time_message(&t1, "Force vector obtained"); //----------------------------------------- FILE *fp; // fp = fopen("/tmp/asdf", "w"); // // for (int i = 0; i < nnzElements; i++) { // fprintf(fp,"%d,%d,%f\n", Row_IDX[i], Col_IDX[i], A_TTD[i]); // } // fclose(fp); // fp = fopen("/tmp/ttd_vectorB.csv", "w"); // // for (int i = 0; i < m; i++) { // fprintf(fp,"%d,%f\n", i,-g[i]); // } // fclose(fp); // print_time_message(&t1, "Data saved"); //----------------------------------------- float x[n]; float L[n]; float Li[n]; for (int i = 0; i < n; i++) { x[i] = 0; L[i] = 0; for (int k = 4 * i; k < 4 * i + 4; k++) { L[i] += A_TTD[k] * A_TTD[k]; } Li[i] = 1 / L[i]; } //-------------------------------Preparing A' in colum orientated int ATcount[m]; int actualCount[m]; int rowCounts[m]; for (int i = 0; i < m; i++) { ATcount[i] = 0; actualCount[i] = 0; } for (int i = 0; i < nnzElements; i++) { ATcount[Row_IDX[i] - 1]++; // Shift from 1-based to 0 based } rowCounts[0] = ATcount[0]; for (int i = 1; i < m; i++) { int tmpCount = ATcount[i]; rowCounts[i] = ATcount[i]; ATcount[i] += ATcount[i - 1]; actualCount[i] = ATcount[i] - tmpCount; } for (int i = 0; i < m; i++) { ATcount[i] = actualCount[i]; } float ATCB[nnzElements]; int ColAT[nnzElements]; for (int i = 0; i < n; i++) { for (int j = 4 * i; j < 4 * i + 4; j++) { int tmprow = Row_IDX[j] - 1; ColAT[actualCount[tmprow]] = i; ATCB[actualCount[tmprow]] = A_TTD[j]; actualCount[tmprow]++; } } fp = fopen("/tmp/ttd_log.txt", "w"); //-------------------------------Inicializacia CuSpare Library /* allocate GPU memory and copy the matrix and vectors into it */ hipsparseStatus_t status; hipsparseHandle_t handle = 0; /* initialize cusparse library */ status = hipsparseCreate(&handle); if (status != HIPSPARSE_STATUS_SUCCESS) { printf("CUSPARSE Library initialization failed"); } //-------------------------------Alokacia device memroies float* A_d; int* C_idx_d; hipMalloc((void**) &A_d, nnzElements * sizeof(float)); checkCUDAError("kernel execution Alloc A_d"); hipMalloc((void**) &C_idx_d, nnzElements * sizeof(int)); checkCUDAError("kernel execution Alloc R_IDX"); int * n_d; hipMalloc((void**) &n_d, 1 * sizeof(int)); checkCUDAError("kernel execution Alloc ..."); float* derivatives_d; hipMalloc((void**) &derivatives_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc g_d"); float* L_d; hipMalloc((void**) &L_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc Li_d"); int* ATcount_d; hipMalloc((void**) &ATcount_d, m * sizeof(int)); checkCUDAError("kernel execution Alloc AtCount_d"); float* x_d; hipMalloc((void**) &x_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc x_d"); float* lambda_d; hipMalloc((void**) &lambda_d, 1 * sizeof(float)); checkCUDAError("kernel execution Alloc lambda_d"); // float* Li_d; // hipMalloc((void**) &Li_d, n * sizeof(float)); // checkCUDAError("kernel execution Alloc Li_d"); print_time_message(&t1, "Device memory allocated"); //-------------------------------Copy data hipMemcpy(A_d, &ATCB[0], nnzElements * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy A_d"); hipMemcpy(C_idx_d, &ColAT[0], nnzElements * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy R"); hipMemcpy(derivatives_d, x, n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); hipMemcpy(x_d, x, n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); hipMemcpy(n_d, &n, 1 * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); hipMemcpy(L_d, L, n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); for (int i = m - 1; i > 0; i--) { actualCount[i] -= actualCount[i - 1]; } hipMemcpy(ATcount_d, actualCount, m * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); print_time_message(&t1, "Data coppied"); //-------------------------------Prepare of Derivatives vector on device for (int i = 0; i < m; i++) { if (g[i] != 0) { hipsparseStatus_t copystatus = cusparseSaxpyi(handle, rowCounts[i], g[i], &A_d[ATcount[i]], &C_idx_d[ATcount[i]], derivatives_d, HIPSPARSE_INDEX_BASE_ZERO); if (copystatus != HIPSPARSE_STATUS_SUCCESS) { printf("Nastala chyba!"); } } } print_time_message(&t1, "Derivatives vector on device"); //--------------------------------------------------------------------- //------------------------------Serial Benchmark----------------------- float derivatives[n]; hipMemcpy(&derivatives[0], derivatives_d, n * sizeof(float), hipMemcpyDeviceToHost); print_time_message(&t1, "Initial Shrink Start"); int max_IDX = 0; float max_VAL = 0; float optimalStep = 0; float TVALUES[n]; float EVALUES[n]; for (int j = 0; j < n; j++) { float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; float mlt2 = 1; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else { t = -xLocal; mlt2 = 0; } float tmpEnergy = t * t * LLocal; TVALUES[j] = t; // EVALUES[j] = tmpEnergy / 2;//+ lambda*x[j]*( signbit(x[j]) -signbit(x[j]+TVALUES[j]) ); EVALUES[j] = mlt2 * (tmpEnergy / 2 + lambda * xLocal * (signbit(xLocal) - signbit(xLocal + t))) + (1 - mlt2) * (lambda * xLocal * signbit(xLocal) - derivatives[j] * t - LLocal * t * t / 2); if (tmpEnergy > max_VAL) { optimalStep = t; max_VAL = tmpEnergy; max_IDX = j; } } print_time_message(&t1, "Initial Shrink End"); print_time_message(&t1, "Start serial Code"); float setialExecutionTime = 0; long long setialItetaions = 0; clock_t t_start; t_start = clock(); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < executionTime) { // printf("optimal t_idx=%d, tval=%f, elapsedTime:%f\n", max_IDX, TVALUES[max_IDX],getTotalElapsetTime(&t_start)); // Update x[max_IDX] += TVALUES[max_IDX]; optimalStep = TVALUES[max_IDX]; for (int ki = max_IDX * 4; ki < max_IDX * 4 + 4; ki++) { for (int jj = ATcount[Row_IDX[ki] - 1]; jj < ATcount[Row_IDX[ki] - 1] + rowCounts[Row_IDX[ki] - 1]; jj++) { int j = ColAT[jj]; derivatives[j] += optimalStep * A_TTD[ki] * ATCB[jj]; float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; float mlt2 = 1; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else { t = -xLocal; mlt2 = 0; } float tmpEnergy = t * t * LLocal; TVALUES[j] = t; // EVALUES[j] = tmpEnergy / 2;//+ lambda*x[j]*( signbit(x[j]) -signbit(x[j]+TVALUES[j]) ); EVALUES[j] = mlt2 * (tmpEnergy / 2 + lambda * xLocal * (signbit(xLocal) - signbit(xLocal + t))) + (1 - mlt2) * (lambda * xLocal * signbit(xLocal) - derivatives[j] * t - LLocal * t * t / 2); } } max_VAL = 0; max_IDX = 0; for (int j = 0; j < n; j++) { if (EVALUES[j] > max_VAL) { max_VAL = EVALUES[j]; max_IDX = j; } } setialItetaions++; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&setialExecutionTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); float setialIterationPerSec = setialItetaions / setialExecutionTime * 1000; printf("Serial execution time:%f ms = %f it/sec \n", setialExecutionTime, setialIterationPerSec); fprintf(fp, "Serial execution time:%f ms = %f it/sec \n", setialExecutionTime, setialIterationPerSec); printf("serial greedy iterations:%d\n", setialItetaions); fprintf(fp, "serial greedy iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_g_serial.txt", x, n, nodeDescription, 0); objVal = computeTTDObjectiveValue(A_TTD, Row_IDX, Col_IDX, x, n, m, b, nnzElements, lambda); printf("Obj Val: %1.16f\n", objVal); fprintf(fp, "Obj Val: %1.16f\n", objVal); //-------------------------------Computation float* T_dev; float* E_dev; hipblasAlloc(n, sizeof(float), (void**) &T_dev); checkCUDAError("Alokacia T_dev"); hipblasAlloc(n, sizeof(float), (void**) &E_dev); checkCUDAError("Alokacia E_dev"); float time; hipsparseStatus_t copystatus; hipMemcpy(lambda_d, &lambda, 1 * sizeof(float), hipMemcpyHostToDevice); dim3 dimBlock( TOTALTHREDSPERBLOCK); dim3 dimGridRCDM( 1, 1+n/(TOTALTHREDSPERBLOCK)); float tPoint[1]; int maxIndex = 10; hipLaunchKernelGGL(( ShrinkKernel), dim3(dimGridRCDM) ,dim3(dimBlock) , 0, 0, T_dev, E_dev, derivatives_d, L_d,x_d, lambda, n); int maxShrinkSubset = 0; for (int i = 0; i < m; i++) { if (rowCounts[i] > maxShrinkSubset) maxShrinkSubset = rowCounts[i]; } printf("Max shrink subset %d\n", maxShrinkSubset); dim3 dimGridShrinkSubset( 1, 1+maxShrinkSubset/(TOTALTHREDSPERBLOCK)); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); long long parallel_iterations = 0; t_start = clock(); while (getTotalElapsetTime(&t_start) < executionTime) { for (int asdf = 0; asdf < 100; asdf++) { maxIndex = hipblasIsamax(n, E_dev, 1); maxIndex = maxIndex - 1; // printf("%d;Selected index:%d\n", i, maxIndex); hipLaunchKernelGGL(( IncreaseElementAndShrink), dim3(1) ,dim3(1) , 0, 0, maxIndex,T_dev, E_dev, derivatives_d, L_d,x_d, lambda, n ); checkCUDAError("IncreaseElement"); hipblasGetVector(1, sizeof(float), &T_dev[maxIndex], 1, tPoint, 1); checkCUDAError("get T"); // printf("optimal t_idx=%d and value = %f\n", maxIndex, tPoint[0]); for (int ki = maxIndex * 4; ki < maxIndex * 4 + 4; ki++) { // printf("pridat %f %f %d\n", tPoint[0], A_TTD[ki],Row_IDX[ki]-1); copystatus = cusparseSaxpyi(handle, rowCounts[Row_IDX[ki] - 1], tPoint[0] * A_TTD[ki], &A_d[ATcount[Row_IDX[ki] - 1]], &C_idx_d[ATcount[Row_IDX[ki] - 1]], derivatives_d, HIPSPARSE_INDEX_BASE_ZERO); if (copystatus != HIPSPARSE_STATUS_SUCCESS) printf("Nastala chyba pri CuSparse!\n"); // hipDeviceSynchronize(); hipLaunchKernelGGL(( ShrinkKernelSubset), dim3(dimGridShrinkSubset) ,dim3(dimBlock) , 0, 0, T_dev, E_dev, derivatives_d, L_d, x_d, lambda, &ATcount_d[Row_IDX[ki] - 1], &C_idx_d[ATcount[Row_IDX[ki] - 1]]); // checkCUDAError("Shrink subset"); } parallel_iterations++; } } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("pridane %f \n", tPoint[0]); printf("trvanie %f ms , %f iter/sec speedUp:%f \n", time, parallel_iterations / time * 1000, parallel_iterations / time * 1000 / setialIterationPerSec); printf("parallel greedy iterations:%d\n", parallel_iterations); fprintf(fp, "trvanie %f ms , %f iter/sec speedUp:%f \n", time, parallel_iterations / time * 1000, parallel_iterations / time * 1000 / setialIterationPerSec); fprintf(fp, "parallel greedy iterations:%d\n", parallel_iterations); float treshHold = 0; hipMemcpy(x, x_d, n * sizeof(float), hipMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); saveSolutionIntoFile("/tmp/ttd_g_parallel.txt", x, n, nodeDescription, treshHold); objVal = computeTTDObjectiveValue(A_TTD, Row_IDX, Col_IDX, x, n, m, b, nnzElements, lambda); printf("Obj Val: %1.16f\n", objVal); fprintf(fp, "Obj Val: %1.16f\n", objVal); print_time_message(&t1, "Computation"); //-------------------------------DeAllocation hipFree(ATcount_d); print_time_message(&t1, "Device memory DeAllocated"); for (int i = 0; i < n; i++) x[i] = 0; hipMalloc((void**) &redisuals_dev, m * sizeof(float)); checkCUDAError("kernel execution Alloc residuals_d"); hipMemcpy(redisuals_dev, &residuals[0], m * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy residuals_d"); hipMemcpy(x_d, &x[0], n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy x"); setialItetaions = 0; t_start = clock(); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < SRCDMExecutionTime) { float tmp = (float) rand() / RAND_MAX; int idx = (int) (n * tmp); tmp = 0; for (int j = idx * 4; j < idx * 4 + 4; j++) { //printf("A=%f, res=%f,colid=%d\n",A_TTD[j],residuals[Col_IDX[j]],Col_IDX[j]); tmp += A_TTD[j] * residuals[Row_IDX[j] - 1]; } float tmp1 = Li[idx] * (tmp + lambda); if (x[idx] > tmp1) { tmp = -tmp1; } else { tmp1 = Li[idx] * (tmp - lambda); if (x[idx] < tmp1) { tmp = -tmp1; } else { tmp = -x[idx]; } } x[idx] += tmp; // printf("ID:%d, value%f\n",idx, x[idx]); for (int j = idx * 4; j < idx * 4 + 4; j++) { residuals[Row_IDX[j] - 1] += tmp * A_TTD[j]; } setialItetaions++; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&setialExecutionTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); float serialItPsec = (float) setialItetaions / setialExecutionTime; printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); printf("serial random iterations:%d\n", setialItetaions); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); fprintf(fp, "serial random iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_r_serial.txt", x, n, nodeDescription, 0); objVal = computeTTDObjectiveValue(A_TTD, Row_IDX, Col_IDX, x, n, m, b, nnzElements, lambda); printf("Obj Val: %1.16f\n", objVal); fprintf(fp, "Obj Val: %1.16f\n", objVal); //-------------------------------------------------- // ============ Parallel Random dim3 dimBlockRCD2( TOTALTHREDSPERBLOCK); dim3 dimGridRCDM2( dim1, dim2); hiprandState_t *devStates; hipMalloc((void **) &devStates, dim1 * dim2 * TOTALTHREDSPERBLOCK * sizeof(hiprandState_t)); hipLaunchKernelGGL(( setup_kernel), dim3(dimGridRCDM2), dim3(dimBlockRCD2) , 0, 0, devStates); checkCUDAError("Inicializacia ranom states"); hipFree(L_d); checkCUDAError("Dealocation"); float* Li_dev; hipMalloc((void**) &Li_dev, n * sizeof(float)); checkCUDAError("kernel execution Alloc Li_d"); hipMemcpy(Li_dev, &Li[0], n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Coppy Li_d"); hipMemcpy(A_d, A_TTD, nnzElements * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy A_d"); hipFree(C_idx_d); checkCUDAError("Dealocation"); int* R_idx_d; hipMalloc((void**) &R_idx_d, nnzElements * sizeof(int)); checkCUDAError("kernel execution Alloc R_idx_d"); hipMemcpy(R_idx_d, Row_IDX, nnzElements * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy Ridx_d"); print_time_message(&t1, "Idem spustat paralelny RCDM"); parallel_iterations = 0; float parallelExecutionTime; t_start = clock(); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); float rcdmBufferedTime = 0; while (rcdmBufferedTime < PRCDMExecutionTime) { for (int koko = 0; koko < 4000; koko++) { hipLaunchKernelGGL(( RCDMKernel), dim3(dimGridRCDM2), dim3(dimBlockRCD2) , 0, 0, A_d, R_idx_d, n_d, redisuals_dev, x_d, lambda_d, Li_dev, devStates); parallel_iterations += dim1 * dim2; } rcdmBufferedTime += getTotalElapsetTime(&t_start); float norm_g = hipblasSnrm2(m, redisuals_dev, 1); float norm_x = hipblasSasum(n, x_d, 1); double rcdmObjectiveValue = norm_x * lambda + 0.5 * norm_g * norm_g; // printf("Current value = %1.16f\n", rcdmObjectiveValue); hipMemcpy(x, x_d, n * sizeof(float), hipMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); double DoubleObjectiveValue = computeTTDObjectiveValue(A_TTD, Row_IDX, Col_IDX, x, n, m, b, nnzElements, lambda); printf("Double objective value: %1.16f\n", DoubleObjectiveValue); printf("Error: %1.16f\n", abs(DoubleObjectiveValue - rcdmObjectiveValue)); updateG(A_TTD, Row_IDX, Col_IDX, x, n, m, b, nnzElements, residuals); hipMemcpy(redisuals_dev, &residuals[0], m * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy residuals_d"); printf("RESTART\n"); t_start = clock(); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&parallelExecutionTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("parallel random iterations:%d toital %d\n", parallel_iterations, parallel_iterations * NMAXITERKernel * TOTALTHREDSPERBLOCK); printf("trvanie %f ms , %f iter/sec , speedup:%f \n", parallelExecutionTime, parallel_iterations / parallelExecutionTime * 1000 * NMAXITERKernel * TOTALTHREDSPERBLOCK, TOTALTHREDSPERBLOCK * NMAXITERKernel * parallel_iterations / parallelExecutionTime / serialItPsec); fprintf(fp, "parallel random iterations:%d toital %d\n", parallel_iterations, parallel_iterations * NMAXITERKernel * TOTALTHREDSPERBLOCK); fprintf(fp, "trvanie %f ms , %f iter/sec , speedup:%f \n", parallelExecutionTime, parallel_iterations / parallelExecutionTime * 1000 * NMAXITERKernel * TOTALTHREDSPERBLOCK, TOTALTHREDSPERBLOCK * NMAXITERKernel * parallel_iterations / parallelExecutionTime / serialItPsec); print_time_message(&t1, "END spustat paralelny RCDM"); hipMemcpy(x, x_d, n * sizeof(float), hipMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); saveSolutionIntoFile("/tmp/ttd_r_parallel.txt", x, n, nodeDescription, treshHold); objVal = computeTTDObjectiveValue(A_TTD, Row_IDX, Col_IDX, x, n, m, b, nnzElements, lambda); printf("Obj Val: %1.16f\n", objVal); fprintf(fp, "Obj Val: %1.16f\n", objVal); //------------------------------------------------------ //===================== Nesterov Composite Accelerated float A_nest = 0; float L_nest = L[0]; float mu = 0; float v[n]; float y[n]; float derv[n]; float derv2[n]; float dervHistory[n]; for (int i = 0; i < n; i++) { x[i] = 0; v[i] = 0; derv[i] = 0; dervHistory[i] = 0; } float gamma_u = 2; float gamma_d = gamma_u; float quadSolTmp = 0; parallel_iterations = 0; t_start = clock(); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < executionTime) { int doRepeat = 1; while (doRepeat) { quadSolTmp = 2 * (1 + A_nest * mu) / L_nest; float a = (quadSolTmp + sqrt(quadSolTmp * quadSolTmp + 4 * A_nest * quadSolTmp)) / 2; float convCombA = A_nest / (a + A_nest); float convComba = a / (a + A_nest); for (int i = 0; i < n; i++) { y[i] = convCombA * x[i] + convComba * v[i]; } for (int i = 0; i < n; i++) { derv[i] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * y[Col_IDX[i] - 1]; } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } float LLocal = L_nest; float Li = 1 / LLocal; float lambdaOverL = lambda * Li; for (int j = 0; j < n; j++) { float xLocal = x[j]; float alpha = derv[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else t = -xLocal; TVALUES[j] = t; } for (int j = 0; j < n; j++) { derv2[j] = -L_nest * TVALUES[j] - derv[j]; derv[j] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * (y[Col_IDX[i] - 1] + TVALUES[Col_IDX[i] - 1]); } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } for (int i = 0; i < n; i++) { derv[i] += derv2[i]; } float LS = 0; float RS = 0; for (int i = 0; i < n; i++) { LS -= derv[i] * TVALUES[i]; RS += derv[i] * derv[i]; } RS = RS / L_nest; if (LS < RS) { L_nest = L_nest * gamma_u; doRepeat = 1; } else { doRepeat = 0; A_nest += a; for (int i = 0; i < n; i++) { x[i] = y[i] + TVALUES[i]; } L_nest = L_nest / gamma_d; float ratioA = (A_nest - a) / A_nest; float ratioB = (a) / A_nest; for (int i = 0; i < n; i++) { dervHistory[i] = ratioA * dervHistory[i] + ratioB * (derv[i] - derv2[i]); } float LLocal = 1 / A_nest; float Li = A_nest; float lambdaOverL = lambda * Li; for (int i = 0; i < n; i++) { float alpha = dervHistory[i] * Li; float deltaL = -alpha - lambdaOverL; float deltaR = -alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL; else if (deltaR < 0) t = deltaR; else t = 0; v[i] = t; } } } parallel_iterations++; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&setialExecutionTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("serial AC iterations:%d\n", parallel_iterations); printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, parallel_iterations / setialExecutionTime * 1000); fprintf(fp, "serial AC iterations:%d\n", parallel_iterations); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, parallel_iterations / setialExecutionTime * 1000); saveSolutionIntoFile("/tmp/ttd_ac.txt", x, n, nodeDescription, treshHold); objVal = computeTTDObjectiveValue(A_TTD, Row_IDX, Col_IDX, x, n, m, b, nnzElements, lambda); printf("Obj Val: %1.16f\n", objVal); fprintf(fp, "Obj Val: %1.16f\n", objVal); //------------------------------------------------------- hipFree(Li_dev); checkCUDAError("Dealocationa"); hipFree(R_idx_d); checkCUDAError("Dealocationb"); hipFree(redisuals_dev); checkCUDAError("Dealocationc"); hipFree(lambda_d); checkCUDAError("Dealocationd"); hipFree(A_d); checkCUDAError("Dealocatione"); hipFree(n_d); checkCUDAError("Dealocationg"); hipFree(x_d); checkCUDAError("Dealocationh"); hipFree(derivatives_d); checkCUDAError("Dealocationi"); print_time_message(&t1, "Device memory DeAllocated"); fclose(fp); } void oneDayPR(int row, int col, int exampleType, float executionTime, float lambda) { int dim1 = 14; int dim2 = 1; float PRCDMExecutionTime = 3600; float SRCDMExecutionTime = 0; int m, n; clock_t t1; float * A_TTD; int * Row_IDX; int * Col_IDX; int nnzElements; int* nodeDescription; //-------------------------------GET BOUNDARY POINTS int * boundary; int boundarySize = 0; getBoundaryVector(row, col, exampleType, &boundary, &boundarySize); cout << "Boundary size is " << boundarySize << "\n"; //-------------------------------GENERATE PROBLEMS t1 = clock(); generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX, &Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription, ROWSCALE); print_time_message(&t1, "Getting problem dimension"); cout << "Dimension of your problem is " << m << " x " << n << "\n"; printf("Number of NNZ: %d\n", nnzElements); //-------------------------------GET FORCE VECTORS float* g; getForceVector(&g, m, row, col, exampleType); for (int i = 0; i < m; i++) { g[i] = -g[i]; } print_time_message(&t1, "Force vector obtained"); //----------------------------------------- FILE *fp; // fp = fopen("/tmp/asdf", "w"); // // for (int i = 0; i < nnzElements; i++) { // fprintf(fp,"%d,%d,%f\n", Row_IDX[i], Col_IDX[i], A_TTD[i]); // } // fclose(fp); // fp = fopen("/tmp/ttd_vectorB.csv", "w"); // // for (int i = 0; i < m; i++) { // fprintf(fp,"%d,%f\n", i,-g[i]); // } // fclose(fp); // print_time_message(&t1, "Data saved"); //----------------------------------------- float x[n]; float L[n]; float Li[n]; for (int i = 0; i < n; i++) { x[i] = 0; L[i] = 0; for (int k = 4 * i; k < 4 * i + 4; k++) { L[i] += A_TTD[k] * A_TTD[k]; } Li[i] = 1 / L[i]; } //-------------------------------Preparing A' in colum orientated int ATcount[m]; int actualCount[m]; int rowCounts[m]; for (int i = 0; i < m; i++) { ATcount[i] = 0; actualCount[i] = 0; } for (int i = 0; i < nnzElements; i++) { ATcount[Row_IDX[i] - 1]++; // Shift from 1-based to 0 based } rowCounts[0] = ATcount[0]; for (int i = 1; i < m; i++) { int tmpCount = ATcount[i]; rowCounts[i] = ATcount[i]; ATcount[i] += ATcount[i - 1]; actualCount[i] = ATcount[i] - tmpCount; } for (int i = 0; i < m; i++) { ATcount[i] = actualCount[i]; } float ATCB[nnzElements]; int ColAT[nnzElements]; for (int i = 0; i < n; i++) { for (int j = 4 * i; j < 4 * i + 4; j++) { int tmprow = Row_IDX[j] - 1; ColAT[actualCount[tmprow]] = i; ATCB[actualCount[tmprow]] = A_TTD[j]; actualCount[tmprow]++; } } fp = fopen("/tmp/ttd_log.txt", "w"); //-------------------------------Inicializacia CuSpare Library /* allocate GPU memory and copy the matrix and vectors into it */ hipsparseStatus_t status; hipsparseHandle_t handle = 0; /* initialize cusparse library */ status = hipsparseCreate(&handle); if (status != HIPSPARSE_STATUS_SUCCESS) { printf("CUSPARSE Library initialization failed"); } //-------------------------------Alokacia device memroies float* A_d; int* C_idx_d; hipMalloc((void**) &A_d, nnzElements * sizeof(float)); checkCUDAError("kernel execution Alloc A_d"); hipMalloc((void**) &C_idx_d, nnzElements * sizeof(int)); checkCUDAError("kernel execution Alloc R_IDX"); int * n_d; hipMalloc((void**) &n_d, 1 * sizeof(int)); checkCUDAError("kernel execution Alloc ..."); float* derivatives_d; hipMalloc((void**) &derivatives_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc g_d"); float* L_d; hipMalloc((void**) &L_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc Li_d"); int* ATcount_d; hipMalloc((void**) &ATcount_d, m * sizeof(int)); checkCUDAError("kernel execution Alloc AtCount_d"); float* x_d; hipMalloc((void**) &x_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc x_d"); float* lambda_d; hipMalloc((void**) &lambda_d, 1 * sizeof(float)); checkCUDAError("kernel execution Alloc lambda_d"); // float* Li_d; // hipMalloc((void**) &Li_d, n * sizeof(float)); // checkCUDAError("kernel execution Alloc Li_d"); print_time_message(&t1, "Device memory allocated"); //-------------------------------Copy data hipMemcpy(A_d, &ATCB[0], nnzElements * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy A_d"); hipMemcpy(C_idx_d, &ColAT[0], nnzElements * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy R"); hipMemcpy(derivatives_d, x, n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); hipMemcpy(x_d, x, n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); hipMemcpy(n_d, &n, 1 * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); hipMemcpy(L_d, L, n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); for (int i = m - 1; i > 0; i--) { actualCount[i] -= actualCount[i - 1]; } hipMemcpy(ATcount_d, actualCount, m * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); print_time_message(&t1, "Data coppied"); //-------------------------------Prepare of Derivatives vector on device for (int i = 0; i < m; i++) { if (g[i] != 0) { hipsparseStatus_t copystatus = cusparseSaxpyi(handle, rowCounts[i], g[i], &A_d[ATcount[i]], &C_idx_d[ATcount[i]], derivatives_d, HIPSPARSE_INDEX_BASE_ZERO); if (copystatus != HIPSPARSE_STATUS_SUCCESS) { printf("Nastala chyba!"); } } } print_time_message(&t1, "Derivatives vector on device"); //--------------------------------------------------------------------- //------------------------------Serial Benchmark----------------------- float derivatives[n]; hipMemcpy(&derivatives[0], derivatives_d, n * sizeof(float), hipMemcpyDeviceToHost); print_time_message(&t1, "Initial Shrink Start"); int max_IDX = 0; float max_VAL = 0; float optimalStep = 0; float TVALUES[n]; float EVALUES[n]; for (int j = 0; j < n; j++) { float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; float mlt2 = 1; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else { t = -xLocal; mlt2 = 0; } float tmpEnergy = t * t * LLocal; TVALUES[j] = t; // EVALUES[j] = tmpEnergy / 2;//+ lambda*x[j]*( signbit(x[j]) -signbit(x[j]+TVALUES[j]) ); EVALUES[j] = mlt2 * (tmpEnergy / 2 + lambda * xLocal * (signbit(xLocal) - signbit(xLocal + t))) + (1 - mlt2) * (lambda * xLocal * signbit(xLocal) - derivatives[j] * t - LLocal * t * t / 2); if (tmpEnergy > max_VAL) { optimalStep = t; max_VAL = tmpEnergy; max_IDX = j; } } print_time_message(&t1, "Initial Shrink End"); print_time_message(&t1, "Start serial Code"); float setialExecutionTime = 0; long long setialItetaions = 0; clock_t t_start; t_start = clock(); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < executionTime) { // printf("optimal t_idx=%d, tval=%f, elapsedTime:%f\n", max_IDX, TVALUES[max_IDX],getTotalElapsetTime(&t_start)); // Update x[max_IDX] += TVALUES[max_IDX]; optimalStep = TVALUES[max_IDX]; for (int ki = max_IDX * 4; ki < max_IDX * 4 + 4; ki++) { for (int jj = ATcount[Row_IDX[ki] - 1]; jj < ATcount[Row_IDX[ki] - 1] + rowCounts[Row_IDX[ki] - 1]; jj++) { int j = ColAT[jj]; derivatives[j] += optimalStep * A_TTD[ki] * ATCB[jj]; float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; float mlt2 = 1; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else { t = -xLocal; mlt2 = 0; } float tmpEnergy = t * t * LLocal; TVALUES[j] = t; // EVALUES[j] = tmpEnergy / 2;//+ lambda*x[j]*( signbit(x[j]) -signbit(x[j]+TVALUES[j]) ); EVALUES[j] = mlt2 * (tmpEnergy / 2 + lambda * xLocal * (signbit(xLocal) - signbit(xLocal + t))) + (1 - mlt2) * (lambda * xLocal * signbit(xLocal) - derivatives[j] * t - LLocal * t * t / 2); } } max_VAL = 0; max_IDX = 0; for (int j = 0; j < n; j++) { if (EVALUES[j] > max_VAL) { max_VAL = EVALUES[j]; max_IDX = j; } } setialItetaions++; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&setialExecutionTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); float setialIterationPerSec = setialItetaions / setialExecutionTime * 1000; printf("Serial execution time:%f ms = %f it/sec \n", setialExecutionTime, setialIterationPerSec); fprintf(fp, "Serial execution time:%f ms = %f it/sec \n", setialExecutionTime, setialIterationPerSec); printf("serial greedy iterations:%d\n", setialItetaions); fprintf(fp, "serial greedy iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_g_serial.txt", x, n, nodeDescription, 0); //-------------------------------Computation float* T_dev; float* E_dev; hipblasAlloc(n, sizeof(float), (void**) &T_dev); checkCUDAError("Alokacia T_dev"); hipblasAlloc(n, sizeof(float), (void**) &E_dev); checkCUDAError("Alokacia E_dev"); float time; hipsparseStatus_t copystatus; hipMemcpy(lambda_d, &lambda, 1 * sizeof(float), hipMemcpyHostToDevice); dim3 dimBlock( TOTALTHREDSPERBLOCK); dim3 dimGridRCDM( 1, 1+n/(TOTALTHREDSPERBLOCK)); float tPoint[1]; int maxIndex = 10; hipLaunchKernelGGL(( ShrinkKernel), dim3(dimGridRCDM) ,dim3(dimBlock) , 0, 0, T_dev, E_dev, derivatives_d, L_d,x_d, lambda, n); int maxShrinkSubset = 0; for (int i = 0; i < m; i++) { if (rowCounts[i] > maxShrinkSubset) maxShrinkSubset = rowCounts[i]; } printf("Max shrink subset %d\n", maxShrinkSubset); dim3 dimGridShrinkSubset( 1, 1+maxShrinkSubset/(TOTALTHREDSPERBLOCK)); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); long long parallel_iterations = 0; t_start = clock(); while (getTotalElapsetTime(&t_start) < executionTime) { for (int asdf = 0; asdf < 100; asdf++) { maxIndex = hipblasIsamax(n, E_dev, 1); maxIndex = maxIndex - 1; // printf("%d;Selected index:%d\n", i, maxIndex); hipLaunchKernelGGL(( IncreaseElementAndShrink), dim3(1) ,dim3(1) , 0, 0, maxIndex,T_dev, E_dev, derivatives_d, L_d,x_d, lambda, n ); checkCUDAError("IncreaseElement"); hipblasGetVector(1, sizeof(float), &T_dev[maxIndex], 1, tPoint, 1); checkCUDAError("get T"); // printf("optimal t_idx=%d and value = %f\n", maxIndex, tPoint[0]); for (int ki = maxIndex * 4; ki < maxIndex * 4 + 4; ki++) { // printf("pridat %f %f %d\n", tPoint[0], A_TTD[ki],Row_IDX[ki]-1); copystatus = cusparseSaxpyi(handle, rowCounts[Row_IDX[ki] - 1], tPoint[0] * A_TTD[ki], &A_d[ATcount[Row_IDX[ki] - 1]], &C_idx_d[ATcount[Row_IDX[ki] - 1]], derivatives_d, HIPSPARSE_INDEX_BASE_ZERO); if (copystatus != HIPSPARSE_STATUS_SUCCESS) printf("Nastala chyba pri CuSparse!\n"); // hipDeviceSynchronize(); hipLaunchKernelGGL(( ShrinkKernelSubset), dim3(dimGridShrinkSubset) ,dim3(dimBlock) , 0, 0, T_dev, E_dev, derivatives_d, L_d, x_d, lambda, &ATcount_d[Row_IDX[ki] - 1], &C_idx_d[ATcount[Row_IDX[ki] - 1]]); // checkCUDAError("Shrink subset"); } parallel_iterations++; } } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("pridane %f \n", tPoint[0]); printf("trvanie %f ms , %f iter/sec speedUp:%f \n", time, parallel_iterations / time * 1000, parallel_iterations / time * 1000 / setialIterationPerSec); printf("parallel greedy iterations:%d\n", parallel_iterations); fprintf(fp, "trvanie %f ms , %f iter/sec speedUp:%f \n", time, parallel_iterations / time * 1000, parallel_iterations / time * 1000 / setialIterationPerSec); fprintf(fp, "parallel greedy iterations:%d\n", parallel_iterations); float treshHold = 0; hipMemcpy(x, x_d, n * sizeof(float), hipMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); saveSolutionIntoFile("/tmp/ttd_g_parallel.txt", x, n, nodeDescription, treshHold); print_time_message(&t1, "Computation"); //-------------------------------DeAllocation hipFree(ATcount_d); print_time_message(&t1, "Device memory DeAllocated"); float residuals[m]; float b[m]; float* redisuals_dev; for (int i = 0; i < m; i++) { residuals[i] = g[i]; b[i] = -g[i]; } for (int i = 0; i < n; i++) x[i] = 0; hipMalloc((void**) &redisuals_dev, m * sizeof(float)); checkCUDAError("kernel execution Alloc residuals_d"); hipMemcpy(redisuals_dev, &residuals[0], m * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy residuals_d"); hipMemcpy(x_d, &x[0], n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy x"); setialItetaions = 0; t_start = clock(); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < SRCDMExecutionTime) { float tmp = (float) rand() / RAND_MAX; int idx = (int) (n * tmp); tmp = 0; for (int j = idx * 4; j < idx * 4 + 4; j++) { //printf("A=%f, res=%f,colid=%d\n",A_TTD[j],residuals[Col_IDX[j]],Col_IDX[j]); tmp += A_TTD[j] * residuals[Row_IDX[j] - 1]; } float tmp1 = Li[idx] * (tmp + lambda); if (x[idx] > tmp1) { tmp = -tmp1; } else { tmp1 = Li[idx] * (tmp - lambda); if (x[idx] < tmp1) { tmp = -tmp1; } else { tmp = -x[idx]; } } x[idx] += tmp; // printf("ID:%d, value%f\n",idx, x[idx]); for (int j = idx * 4; j < idx * 4 + 4; j++) { residuals[Row_IDX[j] - 1] += tmp * A_TTD[j]; } setialItetaions++; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&setialExecutionTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); float serialItPsec = (float) setialItetaions / setialExecutionTime; printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); printf("serial random iterations:%d\n", setialItetaions); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); fprintf(fp, "serial random iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_r_serial.txt", x, n, nodeDescription, 0); //-------------------------------------------------- // ============ Parallel Random dim3 dimBlockRCD2( TOTALTHREDSPERBLOCK); dim3 dimGridRCDM2( dim1, dim2); hiprandState_t *devStates; hipMalloc((void **) &devStates, dim1 * dim2 * TOTALTHREDSPERBLOCK * sizeof(hiprandState_t)); hipLaunchKernelGGL(( setup_kernel), dim3(dimGridRCDM2), dim3(dimBlockRCD2) , 0, 0, devStates); checkCUDAError("Inicializacia ranom states"); hipFree(L_d); checkCUDAError("Dealocation"); float* Li_dev; hipMalloc((void**) &Li_dev, n * sizeof(float)); checkCUDAError("kernel execution Alloc Li_d"); hipMemcpy(Li_dev, &Li[0], n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Coppy Li_d"); hipMemcpy(A_d, A_TTD, nnzElements * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy A_d"); hipFree(C_idx_d); checkCUDAError("Dealocation"); int* R_idx_d; hipMalloc((void**) &R_idx_d, nnzElements * sizeof(int)); checkCUDAError("kernel execution Alloc R_idx_d"); hipMemcpy(R_idx_d, Row_IDX, nnzElements * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy Ridx_d"); float l1sum = hipblasSasum(n, x_d, 1); float gNorm = hipblasSnrm2(m, redisuals_dev, 1); printf("k=%d,L1 sum part:%1.16f, \|Ax-b\|^2=%1.16f, fval=%1.16f", 0, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); fprintf(fp, "k=%d,L1 sum part:%1.16f, \|Ax-b\|^2=%1.16f, fval=%1.16f", 0, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); print_time_message(&t1, "Idem spustat paralelny RCDM"); parallel_iterations = 0; float parallelExecutionTime; for (int k = 0; k < 24; k++) { t_start = clock(); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < PRCDMExecutionTime) { hipLaunchKernelGGL(( RCDMKernel), dim3(dimGridRCDM2), dim3(dimBlockRCD2) , 0, 0, A_d, R_idx_d, n_d, redisuals_dev, x_d, lambda_d, Li_dev, devStates); parallel_iterations += dim1 * dim2; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&parallelExecutionTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("k= %d\n", k); printf("parallel random iterations:%d totalMultiplier %d\n", parallel_iterations, NMAXITERKernel * TOTALTHREDSPERBLOCK); printf("trvanie %f ms , %f iter/sec , speedup:%f \n", parallelExecutionTime, parallel_iterations / parallelExecutionTime * 1000 * NMAXITERKernel * TOTALTHREDSPERBLOCK, TOTALTHREDSPERBLOCK * NMAXITERKernel * parallel_iterations / parallelExecutionTime / serialItPsec); fprintf(fp, "k= %d\n", k); fprintf(fp, "parallel random iterations:%d toitalMultiplies %d\n", parallel_iterations, NMAXITERKernel * TOTALTHREDSPERBLOCK); fprintf(fp, "trvanie %f ms , %f iter/sec , speedup:%f \n", parallelExecutionTime, parallel_iterations / parallelExecutionTime * 1000 * NMAXITERKernel * TOTALTHREDSPERBLOCK, TOTALTHREDSPERBLOCK * NMAXITERKernel * parallel_iterations / parallelExecutionTime / serialItPsec); print_time_message(&t1, "END spustat paralelny RCDM"); hipMemcpy(x, x_d, n * sizeof(float), hipMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); string fileName = ""; fileName = "/tmp/ttd_r_parallel_"; std::ostringstream oss; oss << k; fileName += oss.str(); l1sum = hipblasSasum(n, x_d, 1); gNorm = hipblasSnrm2(m, redisuals_dev, 1); printf("k=%d,L1 sum part:%f, \|Ax-b\|^2=%f, fval=%f", k, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); fprintf(fp, "k=%d,L1 sum part:%f, \|Ax-b\|^2=%f, fval=%f", k, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); saveSolutionIntoFile(fileName.c_str(), x, n, nodeDescription, treshHold); } //------------------------------------------------------ //===================== Nesterov Composite Accelerated float A_nest = 0; float L_nest = L[0]; float mu = 0; float v[n]; float y[n]; float derv[n]; float derv2[n]; float dervHistory[n]; for (int i = 0; i < n; i++) { x[i] = 0; v[i] = 0; derv[i] = 0; dervHistory[i] = 0; } float gamma_u = 2; float gamma_d = gamma_u; float quadSolTmp = 0; parallel_iterations = 0; t_start = clock(); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < executionTime) { int doRepeat = 1; while (doRepeat) { quadSolTmp = 2 * (1 + A_nest * mu) / L_nest; float a = (quadSolTmp + sqrt(quadSolTmp * quadSolTmp + 4 * A_nest * quadSolTmp)) / 2; float convCombA = A_nest / (a + A_nest); float convComba = a / (a + A_nest); for (int i = 0; i < n; i++) { y[i] = convCombA * x[i] + convComba * v[i]; } for (int i = 0; i < n; i++) { derv[i] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * y[Col_IDX[i] - 1]; } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } float LLocal = L_nest; float Li = 1 / LLocal; float lambdaOverL = lambda * Li; for (int j = 0; j < n; j++) { float xLocal = x[j]; float alpha = derv[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else t = -xLocal; TVALUES[j] = t; } for (int j = 0; j < n; j++) { derv2[j] = -L_nest * TVALUES[j] - derv[j]; derv[j] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * (y[Col_IDX[i] - 1] + TVALUES[Col_IDX[i] - 1]); } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } for (int i = 0; i < n; i++) { derv[i] += derv2[i]; } float LS = 0; float RS = 0; for (int i = 0; i < n; i++) { LS -= derv[i] * TVALUES[i]; RS += derv[i] * derv[i]; } RS = RS / L_nest; if (LS < RS) { L_nest = L_nest * gamma_u; doRepeat = 1; } else { doRepeat = 0; A_nest += a; for (int i = 0; i < n; i++) { x[i] = y[i] + TVALUES[i]; } L_nest = L_nest / gamma_d; float ratioA = (A_nest - a) / A_nest; float ratioB = (a) / A_nest; for (int i = 0; i < n; i++) { dervHistory[i] = ratioA * dervHistory[i] + ratioB * (derv[i] - derv2[i]); } float LLocal = 1 / A_nest; float Li = A_nest; float lambdaOverL = lambda * Li; for (int i = 0; i < n; i++) { float alpha = dervHistory[i] * Li; float deltaL = -alpha - lambdaOverL; float deltaR = -alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL; else if (deltaR < 0) t = deltaR; else t = 0; v[i] = t; } } } parallel_iterations++; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&setialExecutionTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("serial AC iterations:%d\n", parallel_iterations); printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, parallel_iterations / setialExecutionTime * 1000); fprintf(fp, "serial AC iterations:%d\n", parallel_iterations); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, parallel_iterations / setialExecutionTime * 1000); saveSolutionIntoFile("/tmp/ttd_ac.txt", x, n, nodeDescription, treshHold); //------------------------------------------------------- hipFree(Li_dev); checkCUDAError("Dealocationa"); hipFree(R_idx_d); checkCUDAError("Dealocationb"); hipFree(redisuals_dev); checkCUDAError("Dealocationc"); hipFree(lambda_d); checkCUDAError("Dealocationd"); hipFree(A_d); checkCUDAError("Dealocatione"); hipFree(n_d); checkCUDAError("Dealocationg"); hipFree(x_d); checkCUDAError("Dealocationh"); hipFree(derivatives_d); checkCUDAError("Dealocationi"); print_time_message(&t1, "Device memory DeAllocated"); fclose(fp); } void oneDayPG(int row, int col, int exampleType, float executionTime, float lambda) { int dim1 = 14; int dim2 = 1; float PRCDMExecutionTime = 1; float PGExecutionTime = 600; float SRCDMExecutionTime = 1; int m, n; clock_t t1; float * A_TTD; int * Row_IDX; int * Col_IDX; int nnzElements; int* nodeDescription; //-------------------------------GET BOUNDARY POINTS int * boundary; int boundarySize = 0; getBoundaryVector(row, col, exampleType, &boundary, &boundarySize); cout << "Boundary size is " << boundarySize << "\n"; //-------------------------------GENERATE PROBLEMS t1 = clock(); generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX, &Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription, ROWSCALE); print_time_message(&t1, "Getting problem dimension"); cout << "Dimension of your problem is " << m << " x " << n << "\n"; printf("Number of NNZ: %d\n", nnzElements); //-------------------------------GET FORCE VECTORS float* g; getForceVector(&g, m, row, col, exampleType); for (int i = 0; i < m; i++) { g[i] = -g[i]; } print_time_message(&t1, "Force vector obtained"); //----------------------------------------- FILE *fp; // fp = fopen("/tmp/asdf", "w"); // // for (int i = 0; i < nnzElements; i++) { // fprintf(fp,"%d,%d,%f\n", Row_IDX[i], Col_IDX[i], A_TTD[i]); // } // fclose(fp); // fp = fopen("/tmp/ttd_vectorB.csv", "w"); // // for (int i = 0; i < m; i++) { // fprintf(fp,"%d,%f\n", i,-g[i]); // } // fclose(fp); // print_time_message(&t1, "Data saved"); //----------------------------------------- float x[n]; float L[n]; float Li[n]; for (int i = 0; i < n; i++) { x[i] = 0; L[i] = 0; for (int k = 4 * i; k < 4 * i + 4; k++) { L[i] += A_TTD[k] * A_TTD[k]; } Li[i] = 1 / L[i]; } //-------------------------------Preparing A' in colum orientated int ATcount[m]; int actualCount[m]; int rowCounts[m]; for (int i = 0; i < m; i++) { ATcount[i] = 0; actualCount[i] = 0; } for (int i = 0; i < nnzElements; i++) { ATcount[Row_IDX[i] - 1]++; // Shift from 1-based to 0 based } rowCounts[0] = ATcount[0]; for (int i = 1; i < m; i++) { int tmpCount = ATcount[i]; rowCounts[i] = ATcount[i]; ATcount[i] += ATcount[i - 1]; actualCount[i] = ATcount[i] - tmpCount; } for (int i = 0; i < m; i++) { ATcount[i] = actualCount[i]; } float ATCB[nnzElements]; int ColAT[nnzElements]; for (int i = 0; i < n; i++) { for (int j = 4 * i; j < 4 * i + 4; j++) { int tmprow = Row_IDX[j] - 1; ColAT[actualCount[tmprow]] = i; ATCB[actualCount[tmprow]] = A_TTD[j]; actualCount[tmprow]++; } } fp = fopen("/tmp/ttd_log.txt", "w"); //-------------------------------Inicializacia CuSpare Library /* allocate GPU memory and copy the matrix and vectors into it */ hipsparseStatus_t status; hipsparseHandle_t handle = 0; /* initialize cusparse library */ status = hipsparseCreate(&handle); if (status != HIPSPARSE_STATUS_SUCCESS) { printf("CUSPARSE Library initialization failed"); } //-------------------------------Alokacia device memroies float* A_d; int* C_idx_d; hipMalloc((void**) &A_d, nnzElements * sizeof(float)); checkCUDAError("kernel execution Alloc A_d"); hipMalloc((void**) &C_idx_d, nnzElements * sizeof(int)); checkCUDAError("kernel execution Alloc R_IDX"); int * n_d; hipMalloc((void**) &n_d, 1 * sizeof(int)); checkCUDAError("kernel execution Alloc ..."); float* derivatives_d; hipMalloc((void**) &derivatives_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc g_d"); float* L_d; hipMalloc((void**) &L_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc Li_d"); int* ATcount_d; hipMalloc((void**) &ATcount_d, m * sizeof(int)); checkCUDAError("kernel execution Alloc AtCount_d"); float* x_d; hipMalloc((void**) &x_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc x_d"); float* lambda_d; hipMalloc((void**) &lambda_d, 1 * sizeof(float)); checkCUDAError("kernel execution Alloc lambda_d"); // float* Li_d; // hipMalloc((void**) &Li_d, n * sizeof(float)); // checkCUDAError("kernel execution Alloc Li_d"); print_time_message(&t1, "Device memory allocated"); //-------------------------------Copy data hipMemcpy(A_d, &ATCB[0], nnzElements * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy A_d"); hipMemcpy(C_idx_d, &ColAT[0], nnzElements * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy R"); hipMemcpy(derivatives_d, x, n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); hipMemcpy(x_d, x, n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); hipMemcpy(n_d, &n, 1 * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); hipMemcpy(L_d, L, n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); for (int i = m - 1; i > 0; i--) { actualCount[i] -= actualCount[i - 1]; } hipMemcpy(ATcount_d, actualCount, m * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); print_time_message(&t1, "Data coppied"); //-------------------------------Prepare of Derivatives vector on device for (int i = 0; i < m; i++) { if (g[i] != 0) { hipsparseStatus_t copystatus = cusparseSaxpyi(handle, rowCounts[i], g[i], &A_d[ATcount[i]], &C_idx_d[ATcount[i]], derivatives_d, HIPSPARSE_INDEX_BASE_ZERO); if (copystatus != HIPSPARSE_STATUS_SUCCESS) { printf("Nastala chyba!"); } } } print_time_message(&t1, "Derivatives vector on device"); //--------------------------------------------------------------------- //------------------------------Serial Benchmark----------------------- float derivatives[n]; hipMemcpy(&derivatives[0], derivatives_d, n * sizeof(float), hipMemcpyDeviceToHost); print_time_message(&t1, "Initial Shrink Start"); int max_IDX = 0; float max_VAL = 0; float optimalStep = 0; float TVALUES[n]; float EVALUES[n]; for (int j = 0; j < n; j++) { float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; float mlt2 = 1; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else { t = -xLocal; mlt2 = 0; } float tmpEnergy = t * t * LLocal; TVALUES[j] = t; // EVALUES[j] = tmpEnergy / 2;//+ lambda*x[j]*( signbit(x[j]) -signbit(x[j]+TVALUES[j]) ); EVALUES[j] = mlt2 * (tmpEnergy / 2 + lambda * xLocal * (signbit(xLocal) - signbit(xLocal + t))) + (1 - mlt2) * (lambda * xLocal * signbit(xLocal) - derivatives[j] * t - LLocal * t * t / 2); if (tmpEnergy > max_VAL) { optimalStep = t; max_VAL = tmpEnergy; max_IDX = j; } } print_time_message(&t1, "Initial Shrink End"); print_time_message(&t1, "Start serial Code"); float setialExecutionTime = 0; long long setialItetaions = 0; clock_t t_start; t_start = clock(); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < executionTime) { // printf("optimal t_idx=%d, tval=%f, elapsedTime:%f\n", max_IDX, TVALUES[max_IDX],getTotalElapsetTime(&t_start)); // Update x[max_IDX] += TVALUES[max_IDX]; optimalStep = TVALUES[max_IDX]; for (int ki = max_IDX * 4; ki < max_IDX * 4 + 4; ki++) { for (int jj = ATcount[Row_IDX[ki] - 1]; jj < ATcount[Row_IDX[ki] - 1] + rowCounts[Row_IDX[ki] - 1]; jj++) { int j = ColAT[jj]; derivatives[j] += optimalStep * A_TTD[ki] * ATCB[jj]; float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; float mlt2 = 1; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else { t = -xLocal; mlt2 = 0; } float tmpEnergy = t * t * LLocal; TVALUES[j] = t; // EVALUES[j] = tmpEnergy / 2;//+ lambda*x[j]*( signbit(x[j]) -signbit(x[j]+TVALUES[j]) ); EVALUES[j] = mlt2 * (tmpEnergy / 2 + lambda * xLocal * (signbit(xLocal) - signbit(xLocal + t))) + (1 - mlt2) * (lambda * xLocal * signbit(xLocal) - derivatives[j] * t - LLocal * t * t / 2); } } max_VAL = 0; max_IDX = 0; for (int j = 0; j < n; j++) { if (EVALUES[j] > max_VAL) { max_VAL = EVALUES[j]; max_IDX = j; } } setialItetaions++; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&setialExecutionTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); float setialIterationPerSec = setialItetaions / setialExecutionTime * 1000; printf("Serial execution time:%f ms = %f it/sec \n", setialExecutionTime, setialIterationPerSec); fprintf(fp, "Serial execution time:%f ms = %f it/sec \n", setialExecutionTime, setialIterationPerSec); printf("serial greedy iterations:%d\n", setialItetaions); fprintf(fp, "serial greedy iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_g_serial.txt", x, n, nodeDescription, 0); //-------------------------------Computation float* T_dev; float* E_dev; hipblasAlloc(n, sizeof(float), (void**) &T_dev); checkCUDAError("Alokacia T_dev"); hipblasAlloc(n, sizeof(float), (void**) &E_dev); checkCUDAError("Alokacia E_dev"); float time; hipsparseStatus_t copystatus; hipMemcpy(lambda_d, &lambda, 1 * sizeof(float), hipMemcpyHostToDevice); dim3 dimBlock( TOTALTHREDSPERBLOCK); dim3 dimGridRCDM( 1, 1+n/(TOTALTHREDSPERBLOCK)); float tPoint[1]; int maxIndex = 10; hipLaunchKernelGGL(( ShrinkKernel), dim3(dimGridRCDM) ,dim3(dimBlock) , 0, 0, T_dev, E_dev, derivatives_d, L_d,x_d, lambda, n); int maxShrinkSubset = 0; for (int i = 0; i < m; i++) { if (rowCounts[i] > maxShrinkSubset) maxShrinkSubset = rowCounts[i]; } printf("Max shrink subset %d\n", maxShrinkSubset); dim3 dimGridShrinkSubset( 1, 1+maxShrinkSubset/(TOTALTHREDSPERBLOCK)); long long parallel_iterations = 0; float treshHold = 0; for (int k = 0; k < 114; k++) { hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); t_start = clock(); while (getTotalElapsetTime(&t_start) < PGExecutionTime) { for (int asdf = 0; asdf < 100; asdf++) { maxIndex = hipblasIsamax(n, E_dev, 1); maxIndex = maxIndex - 1; // printf("%d;Selected index:%d\n", i, maxIndex); hipLaunchKernelGGL(( IncreaseElementAndShrink), dim3(1) ,dim3(1) , 0, 0, maxIndex,T_dev, E_dev, derivatives_d, L_d,x_d, lambda, n ); // hipLaunchKernelGGL(( IncreaseElement), dim3(1) ,dim3(1) , 0, 0, x_d, maxIndex, T_dev); checkCUDAError("IncreaseElement"); hipblasGetVector(1, sizeof(float), &T_dev[maxIndex], 1, tPoint, 1); checkCUDAError("get T"); // printf("optimal t_idx=%d and value = %f\n", maxIndex, tPoint[0]); for (int ki = maxIndex * 4; ki < maxIndex * 4 + 4; ki++) { // printf("pridat %f %f %d\n", tPoint[0], A_TTD[ki],Row_IDX[ki]-1); copystatus = cusparseSaxpyi(handle, rowCounts[Row_IDX[ki] - 1], tPoint[0] * A_TTD[ki], &A_d[ATcount[Row_IDX[ki] - 1]], &C_idx_d[ATcount[Row_IDX[ki] - 1]], derivatives_d, HIPSPARSE_INDEX_BASE_ZERO); if (copystatus != HIPSPARSE_STATUS_SUCCESS) printf("Nastala chyba pri CuSparse!\n"); // hipDeviceSynchronize(); hipLaunchKernelGGL(( ShrinkKernelSubset), dim3(dimGridShrinkSubset) ,dim3(dimBlock) , 0, 0, T_dev, E_dev, derivatives_d, L_d, x_d, lambda, &ATcount_d[Row_IDX[ki] - 1], &C_idx_d[ATcount[Row_IDX[ki] - 1]]); // checkCUDAError("Shrink subset"); } // hipLaunchKernelGGL(( ShrinkKernel), dim3(dimGridRCDM) ,dim3(dimBlock) , 0, 0, T_dev, E_dev, derivatives_d, L_d,x_d, // lambda, n); parallel_iterations++; } } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("pridane %f \n", tPoint[0]); printf("trvanie %f ms , %f iter/sec speedUp:%f \n", time, parallel_iterations / time * 1000, parallel_iterations / time * 1000 / setialIterationPerSec); printf("parallel greedy iterations:%d\n", parallel_iterations); fprintf(fp, "trvanie %f ms , %f iter/sec speedUp:%f \n", time, parallel_iterations / time * 1000, parallel_iterations / time * 1000 / setialIterationPerSec); fprintf(fp, "parallel greedy iterations:%d\n", parallel_iterations); hipMemcpy(x, x_d, n * sizeof(float), hipMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); string fileName = ""; fileName = "/tmp/ttd_g_parallel_"; std::ostringstream oss; oss << k; fileName += oss.str(); // printf("k=%d,L1 sum part:%f, \|Ax-b\|^2=%f, fval=%f", k, l1sum, gNorm // * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); // fprintf(fp, "k=%d,L1 sum part:%f, \|Ax-b\|^2=%f, fval=%f", k, l1sum, // gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); saveSolutionIntoFile(fileName.c_str(), x, n, nodeDescription, treshHold); } print_time_message(&t1, "Computation"); //-------------------------------DeAllocation hipFree(ATcount_d); print_time_message(&t1, "Device memory DeAllocated"); float residuals[m]; float b[m]; float* redisuals_dev; for (int i = 0; i < m; i++) { residuals[i] = g[i]; b[i] = -g[i]; } for (int i = 0; i < n; i++) x[i] = 0; hipMalloc((void**) &redisuals_dev, m * sizeof(float)); checkCUDAError("kernel execution Alloc residuals_d"); hipMemcpy(redisuals_dev, &residuals[0], m * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy residuals_d"); hipMemcpy(x_d, &x[0], n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy x"); setialItetaions = 0; t_start = clock(); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < SRCDMExecutionTime) { float tmp = (float) rand() / RAND_MAX; int idx = (int) (n * tmp); tmp = 0; for (int j = idx * 4; j < idx * 4 + 4; j++) { //printf("A=%f, res=%f,colid=%d\n",A_TTD[j],residuals[Col_IDX[j]],Col_IDX[j]); tmp += A_TTD[j] * residuals[Row_IDX[j] - 1]; } float tmp1 = Li[idx] * (tmp + lambda); if (x[idx] > tmp1) { tmp = -tmp1; } else { tmp1 = Li[idx] * (tmp - lambda); if (x[idx] < tmp1) { tmp = -tmp1; } else { tmp = -x[idx]; } } x[idx] += tmp; // printf("ID:%d, value%f\n",idx, x[idx]); for (int j = idx * 4; j < idx * 4 + 4; j++) { residuals[Row_IDX[j] - 1] += tmp * A_TTD[j]; } setialItetaions++; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&setialExecutionTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); float serialItPsec = (float) setialItetaions / setialExecutionTime; printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); printf("serial random iterations:%d\n", setialItetaions); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); fprintf(fp, "serial random iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_r_serial.txt", x, n, nodeDescription, 0); //-------------------------------------------------- // ============ Parallel Random dim3 dimBlockRCD2( TOTALTHREDSPERBLOCK); dim3 dimGridRCDM2( dim1, dim2); hiprandState_t *devStates; hipMalloc((void **) &devStates, dim1 * dim2 * TOTALTHREDSPERBLOCK * sizeof(hiprandState_t)); hipLaunchKernelGGL(( setup_kernel), dim3(dimGridRCDM2), dim3(dimBlockRCD2) , 0, 0, devStates); checkCUDAError("Inicializacia ranom states"); hipFree(L_d); checkCUDAError("Dealocation"); float* Li_dev; hipMalloc((void**) &Li_dev, n * sizeof(float)); checkCUDAError("kernel execution Alloc Li_d"); hipMemcpy(Li_dev, &Li[0], n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Coppy Li_d"); hipMemcpy(A_d, A_TTD, nnzElements * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy A_d"); hipFree(C_idx_d); checkCUDAError("Dealocation"); int* R_idx_d; hipMalloc((void**) &R_idx_d, nnzElements * sizeof(int)); checkCUDAError("kernel execution Alloc R_idx_d"); hipMemcpy(R_idx_d, Row_IDX, nnzElements * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy Ridx_d"); float l1sum = hipblasSasum(n, x_d, 1); float gNorm = hipblasSnrm2(m, redisuals_dev, 1); printf("k=%d,L1 sum part:%1.16f, \|Ax-b\|^2=%1.16f, fval=%1.16f", 0, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); fprintf(fp, "k=%d,L1 sum part:%1.16f, \|Ax-b\|^2=%1.16f, fval=%1.16f", 0, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); print_time_message(&t1, "Idem spustat paralelny RCDM"); parallel_iterations = 0; float parallelExecutionTime; for (int k = 0; k < 0; k++) { t_start = clock(); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < PRCDMExecutionTime) { hipLaunchKernelGGL(( RCDMKernel), dim3(dimGridRCDM2), dim3(dimBlockRCD2) , 0, 0, A_d, R_idx_d, n_d, redisuals_dev, x_d, lambda_d, Li_dev, devStates); parallel_iterations += dim1 * dim2; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&parallelExecutionTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("k= %d\n", k); printf("parallel random iterations:%d totalMultiplier %d\n", parallel_iterations, NMAXITERKernel * TOTALTHREDSPERBLOCK); printf("trvanie %f ms , %f iter/sec , speedup:%f \n", parallelExecutionTime, parallel_iterations / parallelExecutionTime * 1000 * NMAXITERKernel * TOTALTHREDSPERBLOCK, TOTALTHREDSPERBLOCK * NMAXITERKernel * parallel_iterations / parallelExecutionTime / serialItPsec); fprintf(fp, "k= %d\n", k); fprintf(fp, "parallel random iterations:%d toitalMultiplies %d\n", parallel_iterations, NMAXITERKernel * TOTALTHREDSPERBLOCK); fprintf(fp, "trvanie %f ms , %f iter/sec , speedup:%f \n", parallelExecutionTime, parallel_iterations / parallelExecutionTime * 1000 * NMAXITERKernel * TOTALTHREDSPERBLOCK, TOTALTHREDSPERBLOCK * NMAXITERKernel * parallel_iterations / parallelExecutionTime / serialItPsec); print_time_message(&t1, "END spustat paralelny RCDM"); hipMemcpy(x, x_d, n * sizeof(float), hipMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); string fileName = ""; fileName = "/tmp/ttd_r_parallel_"; std::ostringstream oss; oss << k; fileName += oss.str(); l1sum = hipblasSasum(n, x_d, 1); gNorm = hipblasSnrm2(m, redisuals_dev, 1); printf("k=%d,L1 sum part:%f, \|Ax-b\|^2=%f, fval=%f", k, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); fprintf(fp, "k=%d,L1 sum part:%f, \|Ax-b\|^2=%f, fval=%f", k, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); saveSolutionIntoFile(fileName.c_str(), x, n, nodeDescription, treshHold); } //------------------------------------------------------ //===================== Nesterov Composite Accelerated float A_nest = 0; float L_nest = L[0]; float mu = 0; float v[n]; float y[n]; float derv[n]; float derv2[n]; float dervHistory[n]; for (int i = 0; i < n; i++) { x[i] = 0; v[i] = 0; derv[i] = 0; dervHistory[i] = 0; } float gamma_u = 2; float gamma_d = gamma_u; float quadSolTmp = 0; parallel_iterations = 0; t_start = clock(); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < executionTime) { int doRepeat = 1; while (doRepeat) { quadSolTmp = 2 * (1 + A_nest * mu) / L_nest; float a = (quadSolTmp + sqrt(quadSolTmp * quadSolTmp + 4 * A_nest * quadSolTmp)) / 2; float convCombA = A_nest / (a + A_nest); float convComba = a / (a + A_nest); for (int i = 0; i < n; i++) { y[i] = convCombA * x[i] + convComba * v[i]; } for (int i = 0; i < n; i++) { derv[i] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * y[Col_IDX[i] - 1]; } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } float LLocal = L_nest; float Li = 1 / LLocal; float lambdaOverL = lambda * Li; for (int j = 0; j < n; j++) { float xLocal = x[j]; float alpha = derv[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else t = -xLocal; TVALUES[j] = t; } for (int j = 0; j < n; j++) { derv2[j] = -L_nest * TVALUES[j] - derv[j]; derv[j] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * (y[Col_IDX[i] - 1] + TVALUES[Col_IDX[i] - 1]); } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } for (int i = 0; i < n; i++) { derv[i] += derv2[i]; } float LS = 0; float RS = 0; for (int i = 0; i < n; i++) { LS -= derv[i] * TVALUES[i]; RS += derv[i] * derv[i]; } RS = RS / L_nest; if (LS < RS) { L_nest = L_nest * gamma_u; doRepeat = 1; } else { doRepeat = 0; A_nest += a; for (int i = 0; i < n; i++) { x[i] = y[i] + TVALUES[i]; } L_nest = L_nest / gamma_d; float ratioA = (A_nest - a) / A_nest; float ratioB = (a) / A_nest; for (int i = 0; i < n; i++) { dervHistory[i] = ratioA * dervHistory[i] + ratioB * (derv[i] - derv2[i]); } float LLocal = 1 / A_nest; float Li = A_nest; float lambdaOverL = lambda * Li; for (int i = 0; i < n; i++) { float alpha = dervHistory[i] * Li; float deltaL = -alpha - lambdaOverL; float deltaR = -alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL; else if (deltaR < 0) t = deltaR; else t = 0; v[i] = t; } } } parallel_iterations++; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&setialExecutionTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("serial AC iterations:%d\n", parallel_iterations); printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, parallel_iterations / setialExecutionTime * 1000); fprintf(fp, "serial AC iterations:%d\n", parallel_iterations); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, parallel_iterations / setialExecutionTime * 1000); saveSolutionIntoFile("/tmp/ttd_ac.txt", x, n, nodeDescription, treshHold); //------------------------------------------------------- hipFree(Li_dev); checkCUDAError("Dealocationa"); hipFree(R_idx_d); checkCUDAError("Dealocationb"); hipFree(redisuals_dev); checkCUDAError("Dealocationc"); hipFree(lambda_d); checkCUDAError("Dealocationd"); hipFree(A_d); checkCUDAError("Dealocatione"); hipFree(n_d); checkCUDAError("Dealocationg"); hipFree(x_d); checkCUDAError("Dealocationh"); hipFree(derivatives_d); checkCUDAError("Dealocationi"); print_time_message(&t1, "Device memory DeAllocated"); fclose(fp); } void niceAC(int row, int col, int exampleType, float executionTime, float lambda) { int dim1 = 14; int dim2 = 1; float PRCDMExecutionTime = 0; float SRCDMExecutionTime = PRCDMExecutionTime; float setialExecutionTime = 0; int m, n; clock_t t1; float * A_TTD; int * Row_IDX; int * Col_IDX; int nnzElements; int* nodeDescription; clock_t t_start; //-------------------------------GET BOUNDARY POINTS int * boundary; int boundarySize = 0; getBoundaryVector(row, col, exampleType, &boundary, &boundarySize); cout << "Boundary size is " << boundarySize << "\n"; //-------------------------------GENERATE PROBLEMS t1 = clock(); generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX, &Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription, ROWSCALE); print_time_message(&t1, "Getting problem dimension"); cout << "Dimension of your problem is " << m << " x " << n << "\n"; printf("Number of NNZ: %d\n", nnzElements); //-------------------------------GET FORCE VECTORS float* g; float b[m]; getForceVector(&g, m, row, col, exampleType); for (int i = 0; i < m; i++) { b[i] = g[i]; g[i] = -g[i]; } print_time_message(&t1, "Force vector obtained"); //----------------------------------------- FILE *fp; // fp = fopen("/tmp/asdf", "w"); // // for (int i = 0; i < nnzElements; i++) { // fprintf(fp,"%d,%d,%f\n", Row_IDX[i], Col_IDX[i], A_TTD[i]); // } // fclose(fp); // fp = fopen("/tmp/ttd_vectorB.csv", "w"); // // for (int i = 0; i < m; i++) { // fprintf(fp,"%d,%f\n", i,-g[i]); // } // fclose(fp); // print_time_message(&t1, "Data saved"); //----------------------------------------- float x[n]; float L[n]; float Li[n]; for (int i = 0; i < n; i++) { x[i] = 0; L[i] = 0; for (int k = 4 * i; k < 4 * i + 4; k++) { L[i] += A_TTD[k] * A_TTD[k]; } Li[i] = 1 / L[i]; } //-------------------------------Preparing A' in colum orientated int ATcount[m]; int actualCount[m]; int rowCounts[m]; for (int i = 0; i < m; i++) { ATcount[i] = 0; actualCount[i] = 0; } for (int i = 0; i < nnzElements; i++) { ATcount[Row_IDX[i] - 1]++; // Shift from 1-based to 0 based } rowCounts[0] = ATcount[0]; for (int i = 1; i < m; i++) { int tmpCount = ATcount[i]; rowCounts[i] = ATcount[i]; ATcount[i] += ATcount[i - 1]; actualCount[i] = ATcount[i] - tmpCount; } for (int i = 0; i < m; i++) { ATcount[i] = actualCount[i]; } float ATCB[nnzElements]; int ColAT[nnzElements]; for (int i = 0; i < n; i++) { for (int j = 4 * i; j < 4 * i + 4; j++) { int tmprow = Row_IDX[j] - 1; ColAT[actualCount[tmprow]] = i; ATCB[actualCount[tmprow]] = A_TTD[j]; actualCount[tmprow]++; } } fp = fopen("/tmp/ttd_log.txt", "w"); //-------------------------------Inicializacia CuSpare Library /* allocate GPU memory and copy the matrix and vectors into it */ float residuals[m]; float* redisuals_dev; for (int i = 0; i < m; i++) { residuals[i] = g[i]; } for (int i = 0; i < n; i++) x[i] = 0; long long setialItetaions = 0; t_start = clock(); while (getTotalElapsetTime(&t_start) < SRCDMExecutionTime) { float tmp = (float) rand() / RAND_MAX; int idx = (int) (n * tmp); tmp = 0; for (int j = idx * 4; j < idx * 4 + 4; j++) { //printf("A=%f, res=%f,colid=%d\n",A_TTD[j],residuals[Col_IDX[j]],Col_IDX[j]); tmp += A_TTD[j] * residuals[Row_IDX[j] - 1]; } float tmp1 = Li[idx] * (tmp + lambda); if (x[idx] > tmp1) { tmp = -tmp1; } else { tmp1 = Li[idx] * (tmp - lambda); if (x[idx] < tmp1) { tmp = -tmp1; } else { tmp = -x[idx]; } } x[idx] += tmp; // printf("ID:%d, value%f\n",idx, x[idx]); for (int j = idx * 4; j < idx * 4 + 4; j++) { residuals[Row_IDX[j] - 1] += tmp * A_TTD[j]; } setialItetaions++; } float serialItPsec = (float) setialItetaions / setialExecutionTime; printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); printf("serial random iterations:%d\n", setialItetaions); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); fprintf(fp, "serial random iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_r_serial.txt", x, n, nodeDescription, 0); //===================== Nesterov Composite Accelerated float A_nest = 0; float L_nest = L[0]; float mu = 0; float v[n]; float y[n]; float derv[n]; float derv2[n]; float dervHistory[n]; for (int i = 0; i < n; i++) { x[i] = 0; v[i] = 0; derv[i] = 0; dervHistory[i] = 0; } float gamma_u = 2; float gamma_d = gamma_u; float quadSolTmp = 0; float TVALUES[n]; float treshHold = 0; int parallel_iterations = 0; t_start = clock(); while (getTotalElapsetTime(&t_start) < executionTime) { int doRepeat = 1; while (doRepeat) { quadSolTmp = 2 * (1 + A_nest * mu) / L_nest; float a = (quadSolTmp + sqrt(quadSolTmp * quadSolTmp + 4 * A_nest * quadSolTmp)) / 2; float convCombA = A_nest / (a + A_nest); float convComba = a / (a + A_nest); for (int i = 0; i < n; i++) { y[i] = convCombA * x[i] + convComba * v[i]; } for (int i = 0; i < n; i++) { derv[i] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * y[Col_IDX[i] - 1]; } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } float LLocal = L_nest; float Li = 1 / LLocal; float lambdaOverL = lambda * Li; for (int j = 0; j < n; j++) { float xLocal = x[j]; float alpha = derv[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else t = -xLocal; TVALUES[j] = t; } for (int j = 0; j < n; j++) { derv2[j] = -L_nest * TVALUES[j] - derv[j]; derv[j] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * (y[Col_IDX[i] - 1] + TVALUES[Col_IDX[i] - 1]); } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } for (int i = 0; i < n; i++) { derv[i] += derv2[i]; } float LS = 0; float RS = 0; for (int i = 0; i < n; i++) { LS -= derv[i] * TVALUES[i]; RS += derv[i] * derv[i]; } RS = RS / L_nest; if (LS < RS) { L_nest = L_nest * gamma_u; doRepeat = 1; } else { doRepeat = 0; A_nest += a; for (int i = 0; i < n; i++) { x[i] = y[i] + TVALUES[i]; } L_nest = L_nest / gamma_d; float ratioA = (A_nest - a) / A_nest; float ratioB = (a) / A_nest; for (int i = 0; i < n; i++) { dervHistory[i] = ratioA * dervHistory[i] + ratioB * (derv[i] - derv2[i]); } float LLocal = 1 / A_nest; float Li = A_nest; float lambdaOverL = lambda * Li; for (int i = 0; i < n; i++) { float alpha = dervHistory[i] * Li; float deltaL = -alpha - lambdaOverL; float deltaR = -alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL; else if (deltaR < 0) t = deltaR; else t = 0; v[i] = t; } } } parallel_iterations++; } printf("serial AC iterations:%d\n", parallel_iterations); setialExecutionTime = getElapsetTime(&t_start); fprintf(fp, "serial AC iterations:%d\n", parallel_iterations); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, parallel_iterations / setialExecutionTime * 1000); saveSolutionIntoFile("/tmp/ttd_ac_big.txt", x, n, nodeDescription, treshHold); //------------------------------------------------------- print_time_message(&t1, "Device memory DeAllocated"); fclose(fp); } void niceRCDM(int row, int col, int exampleType, float executionTime, float lambda) { int dim1 = 14; int dim2 = 1; float PRCDMExecutionTime = executionTime; float SRCDMExecutionTime = PRCDMExecutionTime; float setialExecutionTime = 0; int m, n; clock_t t1; float * A_TTD; int * Row_IDX; int * Col_IDX; int nnzElements; int* nodeDescription; clock_t t_start; //-------------------------------GET BOUNDARY POINTS int * boundary; int boundarySize = 0; getBoundaryVector(row, col, exampleType, &boundary, &boundarySize); cout << "Boundary size is " << boundarySize << "\n"; //-------------------------------GENERATE PROBLEMS t1 = clock(); generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX, &Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription, ROWSCALE); print_time_message(&t1, "Getting problem dimension"); cout << "Dimension of your problem is " << m << " x " << n << "\n"; printf("Number of NNZ: %d\n", nnzElements); //-------------------------------GET FORCE VECTORS float* g; float b[m]; getForceVector(&g, m, row, col, exampleType); for (int i = 0; i < m; i++) { b[i] = g[i]; g[i] = -g[i]; } print_time_message(&t1, "Force vector obtained"); //----------------------------------------- FILE *fp; // fp = fopen("/tmp/asdf", "w"); // // for (int i = 0; i < nnzElements; i++) { // fprintf(fp,"%d,%d,%f\n", Row_IDX[i], Col_IDX[i], A_TTD[i]); // } // fclose(fp); // fp = fopen("/tmp/ttd_vectorB.csv", "w"); // // for (int i = 0; i < m; i++) { // fprintf(fp,"%d,%f\n", i,-g[i]); // } // fclose(fp); // print_time_message(&t1, "Data saved"); //----------------------------------------- float x[n]; float L[n]; float Li[n]; for (int i = 0; i < n; i++) { x[i] = 0; L[i] = 0; for (int k = 4 * i; k < 4 * i + 4; k++) { L[i] += A_TTD[k] * A_TTD[k]; } Li[i] = 1 / L[i]; } //-------------------------------Preparing A' in colum orientated int ATcount[m]; int actualCount[m]; int rowCounts[m]; for (int i = 0; i < m; i++) { ATcount[i] = 0; actualCount[i] = 0; } for (int i = 0; i < nnzElements; i++) { ATcount[Row_IDX[i] - 1]++; // Shift from 1-based to 0 based } rowCounts[0] = ATcount[0]; for (int i = 1; i < m; i++) { int tmpCount = ATcount[i]; rowCounts[i] = ATcount[i]; ATcount[i] += ATcount[i - 1]; actualCount[i] = ATcount[i] - tmpCount; } for (int i = 0; i < m; i++) { ATcount[i] = actualCount[i]; } float ATCB[nnzElements]; int ColAT[nnzElements]; for (int i = 0; i < n; i++) { for (int j = 4 * i; j < 4 * i + 4; j++) { int tmprow = Row_IDX[j] - 1; ColAT[actualCount[tmprow]] = i; ATCB[actualCount[tmprow]] = A_TTD[j]; actualCount[tmprow]++; } } fp = fopen("/tmp/ttd_log.txt", "w"); //-------------------------------Inicializacia CuSpare Library /* allocate GPU memory and copy the matrix and vectors into it */ float residuals[m]; float* redisuals_dev; for (int i = 0; i < m; i++) { residuals[i] = g[i]; } for (int i = 0; i < n; i++) x[i] = 0; long long setialItetaions = 0; t_start = clock(); while (getTotalElapsetTime(&t_start) < SRCDMExecutionTime) { float tmp = (float) rand() / RAND_MAX; int idx = (int) (n * tmp); tmp = 0; for (int j = idx * 4; j < idx * 4 + 4; j++) { //printf("A=%f, res=%f,colid=%d\n",A_TTD[j],residuals[Col_IDX[j]],Col_IDX[j]); tmp += A_TTD[j] * residuals[Row_IDX[j] - 1]; } float tmp1 = Li[idx] * (tmp + lambda); if (x[idx] > tmp1) { tmp = -tmp1; } else { tmp1 = Li[idx] * (tmp - lambda); if (x[idx] < tmp1) { tmp = -tmp1; } else { tmp = -x[idx]; } } x[idx] += tmp; // printf("ID:%d, value%f\n",idx, x[idx]); for (int j = idx * 4; j < idx * 4 + 4; j++) { residuals[Row_IDX[j] - 1] += tmp * A_TTD[j]; } setialItetaions++; } float serialItPsec = (float) setialItetaions / setialExecutionTime; printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); printf("serial random iterations:%d\n", setialItetaions); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); fprintf(fp, "serial random iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_r_serial_big.txt", x, n, nodeDescription, 0); fclose(fp); } int main(void) { // timeComparison(50, 50, 7, 10000, 0.0001;); // timeComparison(100, 100, 2, 10000, 0.00001); timeComparison(100, 100, 8, 10000, 0.001); // oneDayPG(60, 60, 2, 1, 0.0001); // niceRCDM(200, 200, 2, 80000, 0.0001); // niceAC(200, 200, 2, 80000, 0.0001); // niceProblem(200, 200, 2, 100000, 0.0001); // greedyTTD(); // calculateTTDProblem(); return 1; } void greedyTTD() { int col, row, exampleType; cout << "Enter number of columns: "; col = 80; cin >> col; cout << "Enter number of rows: "; row = 80; cin >> row; // cout << "Enter example type: "; // cin >> exampleType; exampleType = 7; int m, n; clock_t t1;//, t2; float * A_TTD; int * Row_IDX; int * Col_IDX; int nnzElements; int* nodeDescription; //-------------------------------GET BOUNDARY POINTS int * boundary; int boundarySize = 0; getBoundaryVector(row, col, exampleType, &boundary, &boundarySize); cout << "Boundary size is " << boundarySize << "\n"; //-------------------------------GENERATE PROBLEMS t1 = clock(); generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX, &Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription, ROWSCALE); print_time_message(&t1, "Getting problem dimension"); cout << "Dimension of your problem is " << m << " x " << n << "\n"; printf("Number of NNZ: %d\n", nnzElements); //-------------------------------GET FORCE VECTORS float* g; getForceVector(&g, m, row, col, exampleType); for (int i = 0; i < m; i++) { g[i] = -g[i]; } print_time_message(&t1, "Force vector obtained"); float x[n]; float L[n]; for (int i = 0; i < n; i++) { x[i] = 0; L[i] = 0; for (int k = 4 * i; k < 4 * i + 4; k++) { L[i] += A_TTD[k] * A_TTD[k]; } } //-------------------------------Inicializacia CuSpare Library /* allocate GPU memory and copy the matrix and vectors into it */ hipsparseStatus_t status; hipsparseHandle_t handle = 0; /* initialize cusparse library */ status = hipsparseCreate(&handle); if (status != HIPSPARSE_STATUS_SUCCESS) { printf("CUSPARSE Library initialization failed"); } //-------------------------------Preparing A' in colum orientated int ATcount[m]; int actualCount[m]; int rowCounts[m]; for (int i = 0; i < m; i++) { ATcount[i] = 0; actualCount[i] = 0; } for (int i = 0; i < nnzElements; i++) { ATcount[Row_IDX[i] - 1]++; // Shift from 1-based to 0 based } rowCounts[0] = ATcount[0]; for (int i = 1; i < m; i++) { int tmpCount = ATcount[i]; rowCounts[i] = ATcount[i]; ATcount[i] += ATcount[i - 1]; actualCount[i] = ATcount[i] - tmpCount; } for (int i = 0; i < m; i++) { ATcount[i] = actualCount[i]; } float ATCB[nnzElements]; int ColAT[nnzElements]; for (int i = 0; i < n; i++) { for (int j = 4 * i; j < 4 * i + 4; j++) { int tmprow = Row_IDX[j] - 1; ColAT[actualCount[tmprow]] = i; ATCB[actualCount[tmprow]] = A_TTD[j]; actualCount[tmprow]++; } } //-------------------------------Alokacia device memroies float* A_d; int* C_idx_d; hipMalloc((void**) &A_d, nnzElements * sizeof(float)); checkCUDAError("kernel execution Alloc A_d"); hipMalloc((void**) &C_idx_d, nnzElements * sizeof(int)); checkCUDAError("kernel execution Alloc R_IDX"); int * n_d; hipMalloc((void**) &n_d, 1 * sizeof(int)); checkCUDAError("kernel execution Alloc ..."); float* derivatives_d; hipMalloc((void**) &derivatives_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc g_d"); float* L_d; hipMalloc((void**) &L_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc Li_d"); int* ATcount_d; hipMalloc((void**) &ATcount_d, m * sizeof(int)); checkCUDAError("kernel execution Alloc AtCount_d"); float* x_d; hipMalloc((void**) &x_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc x_d"); float* lambda_d; hipMalloc((void**) &lambda_d, 1 * sizeof(float)); checkCUDAError("kernel execution Alloc lambda_d"); // float* Li_d; // hipMalloc((void**) &Li_d, n * sizeof(float)); // checkCUDAError("kernel execution Alloc Li_d"); print_time_message(&t1, "Device memory allocated"); //-------------------------------Copy data hipMemcpy(A_d, &ATCB[0], nnzElements * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy A_d"); hipMemcpy(C_idx_d, &ColAT[0], nnzElements * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution Copy R"); hipMemcpy(derivatives_d, x, n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); hipMemcpy(x_d, x, n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); hipMemcpy(n_d, &n, 1 * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); hipMemcpy(L_d, L, n * sizeof(float), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); for (int i = m - 1; i > 0; i--) { actualCount[i] -= actualCount[i - 1]; } // for (int i=0;i<m;i++) //{ // printf("AT count[%d]=%d\n",i,actualCount[i]); //} hipMemcpy(ATcount_d, actualCount, m * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); print_time_message(&t1, "Data coppied"); //-------------------------------Prepare of Derivatives vector on device for (int i = 0; i < m; i++) { if (g[i] != 0) { hipsparseStatus_t copystatus = cusparseSaxpyi(handle, rowCounts[i], g[i], &A_d[ATcount[i]], &C_idx_d[ATcount[i]], derivatives_d, HIPSPARSE_INDEX_BASE_ZERO); if (copystatus != HIPSPARSE_STATUS_SUCCESS) { printf("Nastala chyba!"); } } } print_time_message(&t1, "Derivatives vector on device"); //--------------------------------------------------------------------- float lambda = 0.0001; //------------------------------Serial Benchmark----------------------- float derivatives[n]; hipMemcpy(&derivatives[0], derivatives_d, n * sizeof(float), hipMemcpyDeviceToHost); print_time_message(&t1, "Initial Shrink Start"); int max_IDX = 0; float max_VAL = 0; float optimalStep = 0; float TVALUES[n]; float EVALUES[n]; for (int j = 0; j < n; j++) { float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else t = -xLocal; float tmpEnergy = t * t * LLocal; TVALUES[j] = t; EVALUES[j] = tmpEnergy; if (tmpEnergy > max_VAL) { optimalStep = t; max_VAL = tmpEnergy; max_IDX = j; } } print_time_message(&t1, "Initial Shrink End"); print_time_message(&t1, "Start serial Code"); float setialExecutionTime = 0; int setialItetaions = 10; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); for (int i = 0; i < setialItetaions; i++) { // printf("optimal t_idx=%d, tval=%f\n", max_IDX, TVALUES[max_IDX]); // Update x[max_IDX] += TVALUES[max_IDX]; optimalStep = TVALUES[max_IDX]; for (int ki = max_IDX * 4; ki < max_IDX * 4 + 4; ki++) { for (int jj = ATcount[Row_IDX[ki] - 1]; jj < ATcount[Row_IDX[ki] - 1] + rowCounts[Row_IDX[ki] - 1]; jj++) { int j = ColAT[jj]; derivatives[j] += optimalStep * A_TTD[ki] * ATCB[jj]; float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else t = -xLocal; TVALUES[j] = t; EVALUES[j] = t * t * LLocal; } } max_VAL = 0; max_IDX = 0; for (int j = 0; j < n; j++) { if (EVALUES[j] > max_VAL) { max_VAL = EVALUES[j]; max_IDX = j; } } } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&setialExecutionTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); float setialIterationPerSec = setialItetaions / setialExecutionTime * 1000; printf("Serial execution time:%f ms = %f it/sec \n", setialExecutionTime, setialIterationPerSec); print_time_message(&t1, "Serial time for 10 iterations"); //-------------------------------Computation float* T_dev; float* E_dev; hipblasAlloc(n, sizeof(float), (void**) &T_dev); checkCUDAError("Alokacia T_dev"); hipblasAlloc(n, sizeof(float), (void**) &E_dev); checkCUDAError("Alokacia E_dev"); int doworking = 1; int iterations = 1; float time; hipsparseStatus_t copystatus; while (doworking == 1) { cout << "Current lambda is " << lambda << ". Do you want to change it? (y/n): "; string doContinue; cin >> doContinue; if (doContinue == "y") { cout << "Enter lambda: "; cin >> lambda; } cout << "Enter number of iterations: "; cin >> iterations; hipMemcpy(lambda_d, &lambda, 1 * sizeof(float), hipMemcpyHostToDevice); dim3 dimBlock( TOTALTHREDSPERBLOCK); dim3 dimGridRCDM( 1, 1+n/(TOTALTHREDSPERBLOCK)); float tPoint[1]; int maxIndex = 10; hipLaunchKernelGGL(( ShrinkKernel), dim3(dimGridRCDM) ,dim3(dimBlock) , 0, 0, T_dev, E_dev, derivatives_d, L_d,x_d, lambda, n); int maxShrinkSubset = 0; for (int i = 0; i < m; i++) { if (rowCounts[i] > maxShrinkSubset) maxShrinkSubset = rowCounts[i]; } printf("Max shrink subset %d\n", maxShrinkSubset); dim3 dimGridShrinkSubset( 1, 1+maxShrinkSubset/(TOTALTHREDSPERBLOCK)); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); for (int i = 0; i < iterations; i++) { // hipLaunchKernelGGL(( ShrinkKernel), dim3(dimGridRCDM) ,dim3(dimBlock) , 0, 0, T_dev, E_dev, derivatives_d, L_d,x_d, // lambda, n); //maxIndex=10; maxIndex = hipblasIsamax(n, E_dev, 1); maxIndex = maxIndex - 1; // printf("%d;Selected index:%d\n", i, maxIndex); hipLaunchKernelGGL(( IncreaseElement), dim3(1) ,dim3(1) , 0, 0, x_d, maxIndex, T_dev); checkCUDAError("IncreaseElement"); hipblasGetVector(1, sizeof(float), &T_dev[maxIndex], 1, tPoint, 1); checkCUDAError("get T"); // printf("optimal t_idx=%d and value = %f\n", maxIndex, tPoint[0]); for (int ki = maxIndex * 4; ki < maxIndex * 4 + 4; ki++) { // printf("pridat %f %f %d\n", tPoint[0], A_TTD[ki],Row_IDX[ki]-1); copystatus = cusparseSaxpyi(handle, rowCounts[Row_IDX[ki] - 1], tPoint[0] * A_TTD[ki], &A_d[ATcount[Row_IDX[ki] - 1]], &C_idx_d[ATcount[Row_IDX[ki] - 1]], derivatives_d, HIPSPARSE_INDEX_BASE_ZERO); if (copystatus != HIPSPARSE_STATUS_SUCCESS) printf("Nastala chyba pri CuSparse!\n"); // hipDeviceSynchronize(); hipLaunchKernelGGL(( ShrinkKernelSubset), dim3(dimGridShrinkSubset) ,dim3(dimBlock) , 0, 0, T_dev, E_dev, derivatives_d, L_d, x_d, lambda, &ATcount_d[Row_IDX[ki] - 1], &C_idx_d[ATcount[Row_IDX[ki] - 1]]); // checkCUDAError("Shrink subset"); // hipDeviceSynchronize(); // ShrinkKernelSubset<<< dimGridRCDM ,dimBlock >>>(T_dev, E_dev, derivatives_d, // L_d, x_d, lambda, n, &C_idx_d[0]); } // hipDeviceSynchronize(); // hipMemcpy(x, derivatives_d, n * sizeof(float), // hipMemcpyDeviceToHost); // checkCUDAError("kernel execution : copy data Derivatives"); // for (int j = 0; j < n; j++) { // printf("der[%d]=%f\n", j, x[j]); // } } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("pridane %f \n", tPoint[0]); printf("trvanie %f ms , %f iter/sec speedUp:%f \n", time, iterations / time * 1000, iterations / time * 1000 / setialIterationPerSec); // double parallelPower = totalthreadsOnBlocks; // parallelPower = parallelPower / time; // parallelPower = parallelPower * 1000 * NMAXITERKernel; // printf( // "Elapset time: %f ms; %1.2f iterations/sec; speedup: %1.4f\n", // time, parallelPower, parallelPower / serialPower); // // L1Norm = hipblasSasum(n, x_d, 1); // residualsSum = hipblasSnrm2(m, g_d, 1); // objectiveValue = 0.5 * residualsSum * residualsSum; // objectiveValue += lambda * L1Norm; // printf("%d:L1 norm = %f, redisuals=%f, objective=%1.16f\n", i, // L1Norm, residualsSum, objectiveValue); cout << "Save particular solution to file? (y/n): "; cin >> doContinue; if (doContinue == "y") { float treshHold; cout << "Enter treshhold for x: "; cin >> treshHold; int writtenBars = 0; FILE *fp; fp = fopen("/tmp/ttd.txt", "w"); hipMemcpy(x, x_d, n * sizeof(float), hipMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); for (int i = 0; i < n; i++) { if (abs(x[i]) > treshHold) { writtenBars++; fprintf(fp, "%d,%d,%d,%d,%f\n", nodeDescription[i * 4], nodeDescription[i * 4 + 1], nodeDescription[i * 4 + 2], nodeDescription[i * 4 + 3], x[i]); } } fclose(fp); printf("Number of written bars:%d\n", writtenBars); } cout << "Continue? (y/n): "; cin >> doContinue; if (doContinue == "n") { doworking = 0; } } print_time_message(&t1, "Computation"); //-------------------------------DeAllocation hipFree(lambda_d); hipFree(A_d); hipFree(C_idx_d); hipFree(n_d); hipFree(x_d); hipFree(derivatives_d); hipFree(L_d); } __global__ void RCDMKernel(float *A, int*R_Idx, int* n, float*residuals, float*x, float * lambda, float* Li, hiprandState_t* cstate) { int j, i, k; float delta, tmp; //partialDetivative int id = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x + threadIdx.x); hiprandState_t localState = cstate[id]; float LambdaParameter = lambda[0]; __shared__ float partialDetivative[TOTALTHREDSPERBLOCK]; float xLocal; float LiLocal; float ALocal[4]; int cidx; int RIDX[4]; for (k = 0; k < NMAXITERKernel; k++) { double d = hiprand_uniform_double(&localState); int idx = (int) (d * n[0]); // LOAD A, R, residuals // float* residualsAddress[COLUMNLENGTH]; xLocal = x[idx]; LiLocal = Li[idx]; cidx = idx * 4; partialDetivative[threadIdx.x] = 0; // #pragma unroll COLUMNLENGTH for (i = 0; i < 4; i++) { j = cidx + i; ALocal[i] = A[j]; RIDX[i] = R_Idx[j] - 1; // residualsAddress[i] = &residuals[RIDX[i]]; partialDetivative[threadIdx.x] += ALocal[i] * residuals[RIDX[i]]; } tmp = LiLocal * (partialDetivative[threadIdx.x] + LambdaParameter); if (xLocal > tmp) { delta = -tmp; } else { tmp = LiLocal * (partialDetivative[threadIdx.x] - LambdaParameter); if (xLocal < tmp) { delta = -tmp; } else { delta = -xLocal; } } atomicAdd(&x[idx], delta); // atomicAdd(&x[idx], 1); for (i = 0; i < COLUMNLENGTH; i++) { atomicAdd(&residuals[RIDX[i]], ALocal[i] * delta); // atomicAdd(residualsAddress[i], ALocal[i] * delta); } } cstate[id] = localState; }
b3d90784c71ebd8887835eb2e745faf34d85fc1a.cu
//ulimit -s unlimited //nvcc -lcusparse test.cu //nvcc -lcublas -lcusparse -arch sm_20 test.cu && ./a.out #include <iostream> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #include "device_functions.h" #include <curand.h> #include <curand_kernel.h> #include <cuda_runtime.h> #include "cublas.h" #include "cusparse.h" #include <sstream> #include "ttd.h" #include "kernels.h" #include "helpers.h" #include "SPBLASTK/include/spblas.h" #define TOTALTHREDSPERBLOCK 256 #define COLUMNLENGTH 4 #define NMAXITERKernel 100 #define ROWSCALE 2 using namespace std; __global__ void RCDMKernel(float *A, int*R_Idx, int* n, float*residuals, float*x, float * lambda, float* Li, curandState* cstate); void timeComparison(int row, int col, int exampleType, float executionTime, float lambda) { double objVal = 0; int dim1 = 14; int dim2 = 1; float PRCDMExecutionTime = executionTime; float SRCDMExecutionTime = executionTime; int m, n; clock_t t1; float * A_TTD; int * Row_IDX; int * Col_IDX; int nnzElements; int* nodeDescription; //-------------------------------GET BOUNDARY POINTS int * boundary; int boundarySize = 0; getBoundaryVector(row, col, exampleType, &boundary, &boundarySize); cout << "Boundary size is " << boundarySize << "\n"; //-------------------------------GENERATE PROBLEMS t1 = clock(); generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX, &Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription, ROWSCALE); print_time_message(&t1, "Getting problem dimension"); cout << "Dimension of your problem is " << m << " x " << n << "\n"; printf("Number of NNZ: %d\n", nnzElements); //-------------------------------GET FORCE VECTORS float* g; float residuals[m]; float b[m]; float* redisuals_dev; getForceVector(&g, m, row, col, exampleType); for (int i = 0; i < m; i++) { g[i] = -g[i]; residuals[i] = g[i]; b[i] = -g[i]; } print_time_message(&t1, "Force vector obtained"); //----------------------------------------- FILE *fp; // fp = fopen("/tmp/asdf", "w"); // // for (int i = 0; i < nnzElements; i++) { // fprintf(fp,"%d,%d,%f\n", Row_IDX[i], Col_IDX[i], A_TTD[i]); // } // fclose(fp); // fp = fopen("/tmp/ttd_vectorB.csv", "w"); // // for (int i = 0; i < m; i++) { // fprintf(fp,"%d,%f\n", i,-g[i]); // } // fclose(fp); // print_time_message(&t1, "Data saved"); //----------------------------------------- float x[n]; float L[n]; float Li[n]; for (int i = 0; i < n; i++) { x[i] = 0; L[i] = 0; for (int k = 4 * i; k < 4 * i + 4; k++) { L[i] += A_TTD[k] * A_TTD[k]; } Li[i] = 1 / L[i]; } //-------------------------------Preparing A' in colum orientated int ATcount[m]; int actualCount[m]; int rowCounts[m]; for (int i = 0; i < m; i++) { ATcount[i] = 0; actualCount[i] = 0; } for (int i = 0; i < nnzElements; i++) { ATcount[Row_IDX[i] - 1]++; // Shift from 1-based to 0 based } rowCounts[0] = ATcount[0]; for (int i = 1; i < m; i++) { int tmpCount = ATcount[i]; rowCounts[i] = ATcount[i]; ATcount[i] += ATcount[i - 1]; actualCount[i] = ATcount[i] - tmpCount; } for (int i = 0; i < m; i++) { ATcount[i] = actualCount[i]; } float ATCB[nnzElements]; int ColAT[nnzElements]; for (int i = 0; i < n; i++) { for (int j = 4 * i; j < 4 * i + 4; j++) { int tmprow = Row_IDX[j] - 1; ColAT[actualCount[tmprow]] = i; ATCB[actualCount[tmprow]] = A_TTD[j]; actualCount[tmprow]++; } } fp = fopen("/tmp/ttd_log.txt", "w"); //-------------------------------Inicializacia CuSpare Library /* allocate GPU memory and copy the matrix and vectors into it */ cusparseStatus_t status; cusparseHandle_t handle = 0; /* initialize cusparse library */ status = cusparseCreate(&handle); if (status != CUSPARSE_STATUS_SUCCESS) { printf("CUSPARSE Library initialization failed"); } //-------------------------------Alokacia device memroies float* A_d; int* C_idx_d; cudaMalloc((void**) &A_d, nnzElements * sizeof(float)); checkCUDAError("kernel execution Alloc A_d"); cudaMalloc((void**) &C_idx_d, nnzElements * sizeof(int)); checkCUDAError("kernel execution Alloc R_IDX"); int * n_d; cudaMalloc((void**) &n_d, 1 * sizeof(int)); checkCUDAError("kernel execution Alloc ..."); float* derivatives_d; cudaMalloc((void**) &derivatives_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc g_d"); float* L_d; cudaMalloc((void**) &L_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc Li_d"); int* ATcount_d; cudaMalloc((void**) &ATcount_d, m * sizeof(int)); checkCUDAError("kernel execution Alloc AtCount_d"); float* x_d; cudaMalloc((void**) &x_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc x_d"); float* lambda_d; cudaMalloc((void**) &lambda_d, 1 * sizeof(float)); checkCUDAError("kernel execution Alloc lambda_d"); // float* Li_d; // cudaMalloc((void**) &Li_d, n * sizeof(float)); // checkCUDAError("kernel execution Alloc Li_d"); print_time_message(&t1, "Device memory allocated"); //-------------------------------Copy data cudaMemcpy(A_d, &ATCB[0], nnzElements * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy A_d"); cudaMemcpy(C_idx_d, &ColAT[0], nnzElements * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy R"); cudaMemcpy(derivatives_d, x, n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); cudaMemcpy(x_d, x, n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); cudaMemcpy(n_d, &n, 1 * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); cudaMemcpy(L_d, L, n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); for (int i = m - 1; i > 0; i--) { actualCount[i] -= actualCount[i - 1]; } cudaMemcpy(ATcount_d, actualCount, m * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); print_time_message(&t1, "Data coppied"); //-------------------------------Prepare of Derivatives vector on device for (int i = 0; i < m; i++) { if (g[i] != 0) { cusparseStatus_t copystatus = cusparseSaxpyi(handle, rowCounts[i], g[i], &A_d[ATcount[i]], &C_idx_d[ATcount[i]], derivatives_d, CUSPARSE_INDEX_BASE_ZERO); if (copystatus != CUSPARSE_STATUS_SUCCESS) { printf("Nastala chyba!"); } } } print_time_message(&t1, "Derivatives vector on device"); //--------------------------------------------------------------------- //------------------------------Serial Benchmark----------------------- float derivatives[n]; cudaMemcpy(&derivatives[0], derivatives_d, n * sizeof(float), cudaMemcpyDeviceToHost); print_time_message(&t1, "Initial Shrink Start"); int max_IDX = 0; float max_VAL = 0; float optimalStep = 0; float TVALUES[n]; float EVALUES[n]; for (int j = 0; j < n; j++) { float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; float mlt2 = 1; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else { t = -xLocal; mlt2 = 0; } float tmpEnergy = t * t * LLocal; TVALUES[j] = t; // EVALUES[j] = tmpEnergy / 2;//+ lambda*x[j]*( signbit(x[j]) -signbit(x[j]+TVALUES[j]) ); EVALUES[j] = mlt2 * (tmpEnergy / 2 + lambda * xLocal * (signbit(xLocal) - signbit(xLocal + t))) + (1 - mlt2) * (lambda * xLocal * signbit(xLocal) - derivatives[j] * t - LLocal * t * t / 2); if (tmpEnergy > max_VAL) { optimalStep = t; max_VAL = tmpEnergy; max_IDX = j; } } print_time_message(&t1, "Initial Shrink End"); print_time_message(&t1, "Start serial Code"); float setialExecutionTime = 0; long long setialItetaions = 0; clock_t t_start; t_start = clock(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < executionTime) { // printf("optimal t_idx=%d, tval=%f, elapsedTime:%f\n", max_IDX, TVALUES[max_IDX],getTotalElapsetTime(&t_start)); // Update x[max_IDX] += TVALUES[max_IDX]; optimalStep = TVALUES[max_IDX]; for (int ki = max_IDX * 4; ki < max_IDX * 4 + 4; ki++) { for (int jj = ATcount[Row_IDX[ki] - 1]; jj < ATcount[Row_IDX[ki] - 1] + rowCounts[Row_IDX[ki] - 1]; jj++) { int j = ColAT[jj]; derivatives[j] += optimalStep * A_TTD[ki] * ATCB[jj]; float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; float mlt2 = 1; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else { t = -xLocal; mlt2 = 0; } float tmpEnergy = t * t * LLocal; TVALUES[j] = t; // EVALUES[j] = tmpEnergy / 2;//+ lambda*x[j]*( signbit(x[j]) -signbit(x[j]+TVALUES[j]) ); EVALUES[j] = mlt2 * (tmpEnergy / 2 + lambda * xLocal * (signbit(xLocal) - signbit(xLocal + t))) + (1 - mlt2) * (lambda * xLocal * signbit(xLocal) - derivatives[j] * t - LLocal * t * t / 2); } } max_VAL = 0; max_IDX = 0; for (int j = 0; j < n; j++) { if (EVALUES[j] > max_VAL) { max_VAL = EVALUES[j]; max_IDX = j; } } setialItetaions++; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&setialExecutionTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); float setialIterationPerSec = setialItetaions / setialExecutionTime * 1000; printf("Serial execution time:%f ms = %f it/sec \n", setialExecutionTime, setialIterationPerSec); fprintf(fp, "Serial execution time:%f ms = %f it/sec \n", setialExecutionTime, setialIterationPerSec); printf("serial greedy iterations:%d\n", setialItetaions); fprintf(fp, "serial greedy iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_g_serial.txt", x, n, nodeDescription, 0); objVal = computeTTDObjectiveValue(A_TTD, Row_IDX, Col_IDX, x, n, m, b, nnzElements, lambda); printf("Obj Val: %1.16f\n", objVal); fprintf(fp, "Obj Val: %1.16f\n", objVal); //-------------------------------Computation float* T_dev; float* E_dev; cublasAlloc(n, sizeof(float), (void**) &T_dev); checkCUDAError("Alokacia T_dev"); cublasAlloc(n, sizeof(float), (void**) &E_dev); checkCUDAError("Alokacia E_dev"); float time; cusparseStatus_t copystatus; cudaMemcpy(lambda_d, &lambda, 1 * sizeof(float), cudaMemcpyHostToDevice); dim3 dimBlock( TOTALTHREDSPERBLOCK); dim3 dimGridRCDM( 1, 1+n/(TOTALTHREDSPERBLOCK)); float tPoint[1]; int maxIndex = 10; ShrinkKernel<<< dimGridRCDM ,dimBlock >>>(T_dev, E_dev, derivatives_d, L_d,x_d, lambda, n); int maxShrinkSubset = 0; for (int i = 0; i < m; i++) { if (rowCounts[i] > maxShrinkSubset) maxShrinkSubset = rowCounts[i]; } printf("Max shrink subset %d\n", maxShrinkSubset); dim3 dimGridShrinkSubset( 1, 1+maxShrinkSubset/(TOTALTHREDSPERBLOCK)); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); long long parallel_iterations = 0; t_start = clock(); while (getTotalElapsetTime(&t_start) < executionTime) { for (int asdf = 0; asdf < 100; asdf++) { maxIndex = cublasIsamax(n, E_dev, 1); maxIndex = maxIndex - 1; // printf("%d;Selected index:%d\n", i, maxIndex); IncreaseElementAndShrink<<< 1 ,1 >>>( maxIndex,T_dev, E_dev, derivatives_d, L_d,x_d, lambda, n ); checkCUDAError("IncreaseElement"); cublasGetVector(1, sizeof(float), &T_dev[maxIndex], 1, tPoint, 1); checkCUDAError("get T"); // printf("optimal t_idx=%d and value = %f\n", maxIndex, tPoint[0]); for (int ki = maxIndex * 4; ki < maxIndex * 4 + 4; ki++) { // printf("pridat %f %f %d\n", tPoint[0], A_TTD[ki],Row_IDX[ki]-1); copystatus = cusparseSaxpyi(handle, rowCounts[Row_IDX[ki] - 1], tPoint[0] * A_TTD[ki], &A_d[ATcount[Row_IDX[ki] - 1]], &C_idx_d[ATcount[Row_IDX[ki] - 1]], derivatives_d, CUSPARSE_INDEX_BASE_ZERO); if (copystatus != CUSPARSE_STATUS_SUCCESS) printf("Nastala chyba pri CuSparse!\n"); // cudaThreadSynchronize(); ShrinkKernelSubset<<< dimGridShrinkSubset ,dimBlock >>>(T_dev, E_dev, derivatives_d, L_d, x_d, lambda, &ATcount_d[Row_IDX[ki] - 1], &C_idx_d[ATcount[Row_IDX[ki] - 1]]); // checkCUDAError("Shrink subset"); } parallel_iterations++; } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("pridane %f \n", tPoint[0]); printf("trvanie %f ms , %f iter/sec speedUp:%f \n", time, parallel_iterations / time * 1000, parallel_iterations / time * 1000 / setialIterationPerSec); printf("parallel greedy iterations:%d\n", parallel_iterations); fprintf(fp, "trvanie %f ms , %f iter/sec speedUp:%f \n", time, parallel_iterations / time * 1000, parallel_iterations / time * 1000 / setialIterationPerSec); fprintf(fp, "parallel greedy iterations:%d\n", parallel_iterations); float treshHold = 0; cudaMemcpy(x, x_d, n * sizeof(float), cudaMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); saveSolutionIntoFile("/tmp/ttd_g_parallel.txt", x, n, nodeDescription, treshHold); objVal = computeTTDObjectiveValue(A_TTD, Row_IDX, Col_IDX, x, n, m, b, nnzElements, lambda); printf("Obj Val: %1.16f\n", objVal); fprintf(fp, "Obj Val: %1.16f\n", objVal); print_time_message(&t1, "Computation"); //-------------------------------DeAllocation cudaFree(ATcount_d); print_time_message(&t1, "Device memory DeAllocated"); for (int i = 0; i < n; i++) x[i] = 0; cudaMalloc((void**) &redisuals_dev, m * sizeof(float)); checkCUDAError("kernel execution Alloc residuals_d"); cudaMemcpy(redisuals_dev, &residuals[0], m * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy residuals_d"); cudaMemcpy(x_d, &x[0], n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy x"); setialItetaions = 0; t_start = clock(); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < SRCDMExecutionTime) { float tmp = (float) rand() / RAND_MAX; int idx = (int) (n * tmp); tmp = 0; for (int j = idx * 4; j < idx * 4 + 4; j++) { //printf("A=%f, res=%f,colid=%d\n",A_TTD[j],residuals[Col_IDX[j]],Col_IDX[j]); tmp += A_TTD[j] * residuals[Row_IDX[j] - 1]; } float tmp1 = Li[idx] * (tmp + lambda); if (x[idx] > tmp1) { tmp = -tmp1; } else { tmp1 = Li[idx] * (tmp - lambda); if (x[idx] < tmp1) { tmp = -tmp1; } else { tmp = -x[idx]; } } x[idx] += tmp; // printf("ID:%d, value%f\n",idx, x[idx]); for (int j = idx * 4; j < idx * 4 + 4; j++) { residuals[Row_IDX[j] - 1] += tmp * A_TTD[j]; } setialItetaions++; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&setialExecutionTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); float serialItPsec = (float) setialItetaions / setialExecutionTime; printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); printf("serial random iterations:%d\n", setialItetaions); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); fprintf(fp, "serial random iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_r_serial.txt", x, n, nodeDescription, 0); objVal = computeTTDObjectiveValue(A_TTD, Row_IDX, Col_IDX, x, n, m, b, nnzElements, lambda); printf("Obj Val: %1.16f\n", objVal); fprintf(fp, "Obj Val: %1.16f\n", objVal); //-------------------------------------------------- // ============ Parallel Random dim3 dimBlockRCD2( TOTALTHREDSPERBLOCK); dim3 dimGridRCDM2( dim1, dim2); curandState *devStates; cudaMalloc((void **) &devStates, dim1 * dim2 * TOTALTHREDSPERBLOCK * sizeof(curandState)); setup_kernel<<< dimGridRCDM2, dimBlockRCD2 >>>(devStates); checkCUDAError("Inicializacia ranom states"); cudaFree(L_d); checkCUDAError("Dealocation"); float* Li_dev; cudaMalloc((void**) &Li_dev, n * sizeof(float)); checkCUDAError("kernel execution Alloc Li_d"); cudaMemcpy(Li_dev, &Li[0], n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Coppy Li_d"); cudaMemcpy(A_d, A_TTD, nnzElements * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy A_d"); cudaFree(C_idx_d); checkCUDAError("Dealocation"); int* R_idx_d; cudaMalloc((void**) &R_idx_d, nnzElements * sizeof(int)); checkCUDAError("kernel execution Alloc R_idx_d"); cudaMemcpy(R_idx_d, Row_IDX, nnzElements * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy Ridx_d"); print_time_message(&t1, "Idem spustat paralelny RCDM"); parallel_iterations = 0; float parallelExecutionTime; t_start = clock(); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); float rcdmBufferedTime = 0; while (rcdmBufferedTime < PRCDMExecutionTime) { for (int koko = 0; koko < 4000; koko++) { RCDMKernel<<< dimGridRCDM2, dimBlockRCD2 >>>(A_d, R_idx_d, n_d, redisuals_dev, x_d, lambda_d, Li_dev, devStates); parallel_iterations += dim1 * dim2; } rcdmBufferedTime += getTotalElapsetTime(&t_start); float norm_g = cublasSnrm2(m, redisuals_dev, 1); float norm_x = cublasSasum(n, x_d, 1); double rcdmObjectiveValue = norm_x * lambda + 0.5 * norm_g * norm_g; // printf("Current value = %1.16f\n", rcdmObjectiveValue); cudaMemcpy(x, x_d, n * sizeof(float), cudaMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); double DoubleObjectiveValue = computeTTDObjectiveValue(A_TTD, Row_IDX, Col_IDX, x, n, m, b, nnzElements, lambda); printf("Double objective value: %1.16f\n", DoubleObjectiveValue); printf("Error: %1.16f\n", abs(DoubleObjectiveValue - rcdmObjectiveValue)); updateG(A_TTD, Row_IDX, Col_IDX, x, n, m, b, nnzElements, residuals); cudaMemcpy(redisuals_dev, &residuals[0], m * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy residuals_d"); printf("RESTART\n"); t_start = clock(); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&parallelExecutionTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("parallel random iterations:%d toital %d\n", parallel_iterations, parallel_iterations * NMAXITERKernel * TOTALTHREDSPERBLOCK); printf("trvanie %f ms , %f iter/sec , speedup:%f \n", parallelExecutionTime, parallel_iterations / parallelExecutionTime * 1000 * NMAXITERKernel * TOTALTHREDSPERBLOCK, TOTALTHREDSPERBLOCK * NMAXITERKernel * parallel_iterations / parallelExecutionTime / serialItPsec); fprintf(fp, "parallel random iterations:%d toital %d\n", parallel_iterations, parallel_iterations * NMAXITERKernel * TOTALTHREDSPERBLOCK); fprintf(fp, "trvanie %f ms , %f iter/sec , speedup:%f \n", parallelExecutionTime, parallel_iterations / parallelExecutionTime * 1000 * NMAXITERKernel * TOTALTHREDSPERBLOCK, TOTALTHREDSPERBLOCK * NMAXITERKernel * parallel_iterations / parallelExecutionTime / serialItPsec); print_time_message(&t1, "END spustat paralelny RCDM"); cudaMemcpy(x, x_d, n * sizeof(float), cudaMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); saveSolutionIntoFile("/tmp/ttd_r_parallel.txt", x, n, nodeDescription, treshHold); objVal = computeTTDObjectiveValue(A_TTD, Row_IDX, Col_IDX, x, n, m, b, nnzElements, lambda); printf("Obj Val: %1.16f\n", objVal); fprintf(fp, "Obj Val: %1.16f\n", objVal); //------------------------------------------------------ //===================== Nesterov Composite Accelerated float A_nest = 0; float L_nest = L[0]; float mu = 0; float v[n]; float y[n]; float derv[n]; float derv2[n]; float dervHistory[n]; for (int i = 0; i < n; i++) { x[i] = 0; v[i] = 0; derv[i] = 0; dervHistory[i] = 0; } float gamma_u = 2; float gamma_d = gamma_u; float quadSolTmp = 0; parallel_iterations = 0; t_start = clock(); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < executionTime) { int doRepeat = 1; while (doRepeat) { quadSolTmp = 2 * (1 + A_nest * mu) / L_nest; float a = (quadSolTmp + sqrt(quadSolTmp * quadSolTmp + 4 * A_nest * quadSolTmp)) / 2; float convCombA = A_nest / (a + A_nest); float convComba = a / (a + A_nest); for (int i = 0; i < n; i++) { y[i] = convCombA * x[i] + convComba * v[i]; } for (int i = 0; i < n; i++) { derv[i] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * y[Col_IDX[i] - 1]; } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } float LLocal = L_nest; float Li = 1 / LLocal; float lambdaOverL = lambda * Li; for (int j = 0; j < n; j++) { float xLocal = x[j]; float alpha = derv[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else t = -xLocal; TVALUES[j] = t; } for (int j = 0; j < n; j++) { derv2[j] = -L_nest * TVALUES[j] - derv[j]; derv[j] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * (y[Col_IDX[i] - 1] + TVALUES[Col_IDX[i] - 1]); } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } for (int i = 0; i < n; i++) { derv[i] += derv2[i]; } float LS = 0; float RS = 0; for (int i = 0; i < n; i++) { LS -= derv[i] * TVALUES[i]; RS += derv[i] * derv[i]; } RS = RS / L_nest; if (LS < RS) { L_nest = L_nest * gamma_u; doRepeat = 1; } else { doRepeat = 0; A_nest += a; for (int i = 0; i < n; i++) { x[i] = y[i] + TVALUES[i]; } L_nest = L_nest / gamma_d; float ratioA = (A_nest - a) / A_nest; float ratioB = (a) / A_nest; for (int i = 0; i < n; i++) { dervHistory[i] = ratioA * dervHistory[i] + ratioB * (derv[i] - derv2[i]); } float LLocal = 1 / A_nest; float Li = A_nest; float lambdaOverL = lambda * Li; for (int i = 0; i < n; i++) { float alpha = dervHistory[i] * Li; float deltaL = -alpha - lambdaOverL; float deltaR = -alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL; else if (deltaR < 0) t = deltaR; else t = 0; v[i] = t; } } } parallel_iterations++; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&setialExecutionTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("serial AC iterations:%d\n", parallel_iterations); printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, parallel_iterations / setialExecutionTime * 1000); fprintf(fp, "serial AC iterations:%d\n", parallel_iterations); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, parallel_iterations / setialExecutionTime * 1000); saveSolutionIntoFile("/tmp/ttd_ac.txt", x, n, nodeDescription, treshHold); objVal = computeTTDObjectiveValue(A_TTD, Row_IDX, Col_IDX, x, n, m, b, nnzElements, lambda); printf("Obj Val: %1.16f\n", objVal); fprintf(fp, "Obj Val: %1.16f\n", objVal); //------------------------------------------------------- cudaFree(Li_dev); checkCUDAError("Dealocationa"); cudaFree(R_idx_d); checkCUDAError("Dealocationb"); cudaFree(redisuals_dev); checkCUDAError("Dealocationc"); cudaFree(lambda_d); checkCUDAError("Dealocationd"); cudaFree(A_d); checkCUDAError("Dealocatione"); cudaFree(n_d); checkCUDAError("Dealocationg"); cudaFree(x_d); checkCUDAError("Dealocationh"); cudaFree(derivatives_d); checkCUDAError("Dealocationi"); print_time_message(&t1, "Device memory DeAllocated"); fclose(fp); } void oneDayPR(int row, int col, int exampleType, float executionTime, float lambda) { int dim1 = 14; int dim2 = 1; float PRCDMExecutionTime = 3600; float SRCDMExecutionTime = 0; int m, n; clock_t t1; float * A_TTD; int * Row_IDX; int * Col_IDX; int nnzElements; int* nodeDescription; //-------------------------------GET BOUNDARY POINTS int * boundary; int boundarySize = 0; getBoundaryVector(row, col, exampleType, &boundary, &boundarySize); cout << "Boundary size is " << boundarySize << "\n"; //-------------------------------GENERATE PROBLEMS t1 = clock(); generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX, &Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription, ROWSCALE); print_time_message(&t1, "Getting problem dimension"); cout << "Dimension of your problem is " << m << " x " << n << "\n"; printf("Number of NNZ: %d\n", nnzElements); //-------------------------------GET FORCE VECTORS float* g; getForceVector(&g, m, row, col, exampleType); for (int i = 0; i < m; i++) { g[i] = -g[i]; } print_time_message(&t1, "Force vector obtained"); //----------------------------------------- FILE *fp; // fp = fopen("/tmp/asdf", "w"); // // for (int i = 0; i < nnzElements; i++) { // fprintf(fp,"%d,%d,%f\n", Row_IDX[i], Col_IDX[i], A_TTD[i]); // } // fclose(fp); // fp = fopen("/tmp/ttd_vectorB.csv", "w"); // // for (int i = 0; i < m; i++) { // fprintf(fp,"%d,%f\n", i,-g[i]); // } // fclose(fp); // print_time_message(&t1, "Data saved"); //----------------------------------------- float x[n]; float L[n]; float Li[n]; for (int i = 0; i < n; i++) { x[i] = 0; L[i] = 0; for (int k = 4 * i; k < 4 * i + 4; k++) { L[i] += A_TTD[k] * A_TTD[k]; } Li[i] = 1 / L[i]; } //-------------------------------Preparing A' in colum orientated int ATcount[m]; int actualCount[m]; int rowCounts[m]; for (int i = 0; i < m; i++) { ATcount[i] = 0; actualCount[i] = 0; } for (int i = 0; i < nnzElements; i++) { ATcount[Row_IDX[i] - 1]++; // Shift from 1-based to 0 based } rowCounts[0] = ATcount[0]; for (int i = 1; i < m; i++) { int tmpCount = ATcount[i]; rowCounts[i] = ATcount[i]; ATcount[i] += ATcount[i - 1]; actualCount[i] = ATcount[i] - tmpCount; } for (int i = 0; i < m; i++) { ATcount[i] = actualCount[i]; } float ATCB[nnzElements]; int ColAT[nnzElements]; for (int i = 0; i < n; i++) { for (int j = 4 * i; j < 4 * i + 4; j++) { int tmprow = Row_IDX[j] - 1; ColAT[actualCount[tmprow]] = i; ATCB[actualCount[tmprow]] = A_TTD[j]; actualCount[tmprow]++; } } fp = fopen("/tmp/ttd_log.txt", "w"); //-------------------------------Inicializacia CuSpare Library /* allocate GPU memory and copy the matrix and vectors into it */ cusparseStatus_t status; cusparseHandle_t handle = 0; /* initialize cusparse library */ status = cusparseCreate(&handle); if (status != CUSPARSE_STATUS_SUCCESS) { printf("CUSPARSE Library initialization failed"); } //-------------------------------Alokacia device memroies float* A_d; int* C_idx_d; cudaMalloc((void**) &A_d, nnzElements * sizeof(float)); checkCUDAError("kernel execution Alloc A_d"); cudaMalloc((void**) &C_idx_d, nnzElements * sizeof(int)); checkCUDAError("kernel execution Alloc R_IDX"); int * n_d; cudaMalloc((void**) &n_d, 1 * sizeof(int)); checkCUDAError("kernel execution Alloc ..."); float* derivatives_d; cudaMalloc((void**) &derivatives_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc g_d"); float* L_d; cudaMalloc((void**) &L_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc Li_d"); int* ATcount_d; cudaMalloc((void**) &ATcount_d, m * sizeof(int)); checkCUDAError("kernel execution Alloc AtCount_d"); float* x_d; cudaMalloc((void**) &x_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc x_d"); float* lambda_d; cudaMalloc((void**) &lambda_d, 1 * sizeof(float)); checkCUDAError("kernel execution Alloc lambda_d"); // float* Li_d; // cudaMalloc((void**) &Li_d, n * sizeof(float)); // checkCUDAError("kernel execution Alloc Li_d"); print_time_message(&t1, "Device memory allocated"); //-------------------------------Copy data cudaMemcpy(A_d, &ATCB[0], nnzElements * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy A_d"); cudaMemcpy(C_idx_d, &ColAT[0], nnzElements * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy R"); cudaMemcpy(derivatives_d, x, n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); cudaMemcpy(x_d, x, n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); cudaMemcpy(n_d, &n, 1 * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); cudaMemcpy(L_d, L, n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); for (int i = m - 1; i > 0; i--) { actualCount[i] -= actualCount[i - 1]; } cudaMemcpy(ATcount_d, actualCount, m * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); print_time_message(&t1, "Data coppied"); //-------------------------------Prepare of Derivatives vector on device for (int i = 0; i < m; i++) { if (g[i] != 0) { cusparseStatus_t copystatus = cusparseSaxpyi(handle, rowCounts[i], g[i], &A_d[ATcount[i]], &C_idx_d[ATcount[i]], derivatives_d, CUSPARSE_INDEX_BASE_ZERO); if (copystatus != CUSPARSE_STATUS_SUCCESS) { printf("Nastala chyba!"); } } } print_time_message(&t1, "Derivatives vector on device"); //--------------------------------------------------------------------- //------------------------------Serial Benchmark----------------------- float derivatives[n]; cudaMemcpy(&derivatives[0], derivatives_d, n * sizeof(float), cudaMemcpyDeviceToHost); print_time_message(&t1, "Initial Shrink Start"); int max_IDX = 0; float max_VAL = 0; float optimalStep = 0; float TVALUES[n]; float EVALUES[n]; for (int j = 0; j < n; j++) { float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; float mlt2 = 1; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else { t = -xLocal; mlt2 = 0; } float tmpEnergy = t * t * LLocal; TVALUES[j] = t; // EVALUES[j] = tmpEnergy / 2;//+ lambda*x[j]*( signbit(x[j]) -signbit(x[j]+TVALUES[j]) ); EVALUES[j] = mlt2 * (tmpEnergy / 2 + lambda * xLocal * (signbit(xLocal) - signbit(xLocal + t))) + (1 - mlt2) * (lambda * xLocal * signbit(xLocal) - derivatives[j] * t - LLocal * t * t / 2); if (tmpEnergy > max_VAL) { optimalStep = t; max_VAL = tmpEnergy; max_IDX = j; } } print_time_message(&t1, "Initial Shrink End"); print_time_message(&t1, "Start serial Code"); float setialExecutionTime = 0; long long setialItetaions = 0; clock_t t_start; t_start = clock(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < executionTime) { // printf("optimal t_idx=%d, tval=%f, elapsedTime:%f\n", max_IDX, TVALUES[max_IDX],getTotalElapsetTime(&t_start)); // Update x[max_IDX] += TVALUES[max_IDX]; optimalStep = TVALUES[max_IDX]; for (int ki = max_IDX * 4; ki < max_IDX * 4 + 4; ki++) { for (int jj = ATcount[Row_IDX[ki] - 1]; jj < ATcount[Row_IDX[ki] - 1] + rowCounts[Row_IDX[ki] - 1]; jj++) { int j = ColAT[jj]; derivatives[j] += optimalStep * A_TTD[ki] * ATCB[jj]; float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; float mlt2 = 1; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else { t = -xLocal; mlt2 = 0; } float tmpEnergy = t * t * LLocal; TVALUES[j] = t; // EVALUES[j] = tmpEnergy / 2;//+ lambda*x[j]*( signbit(x[j]) -signbit(x[j]+TVALUES[j]) ); EVALUES[j] = mlt2 * (tmpEnergy / 2 + lambda * xLocal * (signbit(xLocal) - signbit(xLocal + t))) + (1 - mlt2) * (lambda * xLocal * signbit(xLocal) - derivatives[j] * t - LLocal * t * t / 2); } } max_VAL = 0; max_IDX = 0; for (int j = 0; j < n; j++) { if (EVALUES[j] > max_VAL) { max_VAL = EVALUES[j]; max_IDX = j; } } setialItetaions++; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&setialExecutionTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); float setialIterationPerSec = setialItetaions / setialExecutionTime * 1000; printf("Serial execution time:%f ms = %f it/sec \n", setialExecutionTime, setialIterationPerSec); fprintf(fp, "Serial execution time:%f ms = %f it/sec \n", setialExecutionTime, setialIterationPerSec); printf("serial greedy iterations:%d\n", setialItetaions); fprintf(fp, "serial greedy iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_g_serial.txt", x, n, nodeDescription, 0); //-------------------------------Computation float* T_dev; float* E_dev; cublasAlloc(n, sizeof(float), (void**) &T_dev); checkCUDAError("Alokacia T_dev"); cublasAlloc(n, sizeof(float), (void**) &E_dev); checkCUDAError("Alokacia E_dev"); float time; cusparseStatus_t copystatus; cudaMemcpy(lambda_d, &lambda, 1 * sizeof(float), cudaMemcpyHostToDevice); dim3 dimBlock( TOTALTHREDSPERBLOCK); dim3 dimGridRCDM( 1, 1+n/(TOTALTHREDSPERBLOCK)); float tPoint[1]; int maxIndex = 10; ShrinkKernel<<< dimGridRCDM ,dimBlock >>>(T_dev, E_dev, derivatives_d, L_d,x_d, lambda, n); int maxShrinkSubset = 0; for (int i = 0; i < m; i++) { if (rowCounts[i] > maxShrinkSubset) maxShrinkSubset = rowCounts[i]; } printf("Max shrink subset %d\n", maxShrinkSubset); dim3 dimGridShrinkSubset( 1, 1+maxShrinkSubset/(TOTALTHREDSPERBLOCK)); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); long long parallel_iterations = 0; t_start = clock(); while (getTotalElapsetTime(&t_start) < executionTime) { for (int asdf = 0; asdf < 100; asdf++) { maxIndex = cublasIsamax(n, E_dev, 1); maxIndex = maxIndex - 1; // printf("%d;Selected index:%d\n", i, maxIndex); IncreaseElementAndShrink<<< 1 ,1 >>>( maxIndex,T_dev, E_dev, derivatives_d, L_d,x_d, lambda, n ); checkCUDAError("IncreaseElement"); cublasGetVector(1, sizeof(float), &T_dev[maxIndex], 1, tPoint, 1); checkCUDAError("get T"); // printf("optimal t_idx=%d and value = %f\n", maxIndex, tPoint[0]); for (int ki = maxIndex * 4; ki < maxIndex * 4 + 4; ki++) { // printf("pridat %f %f %d\n", tPoint[0], A_TTD[ki],Row_IDX[ki]-1); copystatus = cusparseSaxpyi(handle, rowCounts[Row_IDX[ki] - 1], tPoint[0] * A_TTD[ki], &A_d[ATcount[Row_IDX[ki] - 1]], &C_idx_d[ATcount[Row_IDX[ki] - 1]], derivatives_d, CUSPARSE_INDEX_BASE_ZERO); if (copystatus != CUSPARSE_STATUS_SUCCESS) printf("Nastala chyba pri CuSparse!\n"); // cudaThreadSynchronize(); ShrinkKernelSubset<<< dimGridShrinkSubset ,dimBlock >>>(T_dev, E_dev, derivatives_d, L_d, x_d, lambda, &ATcount_d[Row_IDX[ki] - 1], &C_idx_d[ATcount[Row_IDX[ki] - 1]]); // checkCUDAError("Shrink subset"); } parallel_iterations++; } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("pridane %f \n", tPoint[0]); printf("trvanie %f ms , %f iter/sec speedUp:%f \n", time, parallel_iterations / time * 1000, parallel_iterations / time * 1000 / setialIterationPerSec); printf("parallel greedy iterations:%d\n", parallel_iterations); fprintf(fp, "trvanie %f ms , %f iter/sec speedUp:%f \n", time, parallel_iterations / time * 1000, parallel_iterations / time * 1000 / setialIterationPerSec); fprintf(fp, "parallel greedy iterations:%d\n", parallel_iterations); float treshHold = 0; cudaMemcpy(x, x_d, n * sizeof(float), cudaMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); saveSolutionIntoFile("/tmp/ttd_g_parallel.txt", x, n, nodeDescription, treshHold); print_time_message(&t1, "Computation"); //-------------------------------DeAllocation cudaFree(ATcount_d); print_time_message(&t1, "Device memory DeAllocated"); float residuals[m]; float b[m]; float* redisuals_dev; for (int i = 0; i < m; i++) { residuals[i] = g[i]; b[i] = -g[i]; } for (int i = 0; i < n; i++) x[i] = 0; cudaMalloc((void**) &redisuals_dev, m * sizeof(float)); checkCUDAError("kernel execution Alloc residuals_d"); cudaMemcpy(redisuals_dev, &residuals[0], m * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy residuals_d"); cudaMemcpy(x_d, &x[0], n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy x"); setialItetaions = 0; t_start = clock(); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < SRCDMExecutionTime) { float tmp = (float) rand() / RAND_MAX; int idx = (int) (n * tmp); tmp = 0; for (int j = idx * 4; j < idx * 4 + 4; j++) { //printf("A=%f, res=%f,colid=%d\n",A_TTD[j],residuals[Col_IDX[j]],Col_IDX[j]); tmp += A_TTD[j] * residuals[Row_IDX[j] - 1]; } float tmp1 = Li[idx] * (tmp + lambda); if (x[idx] > tmp1) { tmp = -tmp1; } else { tmp1 = Li[idx] * (tmp - lambda); if (x[idx] < tmp1) { tmp = -tmp1; } else { tmp = -x[idx]; } } x[idx] += tmp; // printf("ID:%d, value%f\n",idx, x[idx]); for (int j = idx * 4; j < idx * 4 + 4; j++) { residuals[Row_IDX[j] - 1] += tmp * A_TTD[j]; } setialItetaions++; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&setialExecutionTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); float serialItPsec = (float) setialItetaions / setialExecutionTime; printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); printf("serial random iterations:%d\n", setialItetaions); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); fprintf(fp, "serial random iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_r_serial.txt", x, n, nodeDescription, 0); //-------------------------------------------------- // ============ Parallel Random dim3 dimBlockRCD2( TOTALTHREDSPERBLOCK); dim3 dimGridRCDM2( dim1, dim2); curandState *devStates; cudaMalloc((void **) &devStates, dim1 * dim2 * TOTALTHREDSPERBLOCK * sizeof(curandState)); setup_kernel<<< dimGridRCDM2, dimBlockRCD2 >>>(devStates); checkCUDAError("Inicializacia ranom states"); cudaFree(L_d); checkCUDAError("Dealocation"); float* Li_dev; cudaMalloc((void**) &Li_dev, n * sizeof(float)); checkCUDAError("kernel execution Alloc Li_d"); cudaMemcpy(Li_dev, &Li[0], n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Coppy Li_d"); cudaMemcpy(A_d, A_TTD, nnzElements * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy A_d"); cudaFree(C_idx_d); checkCUDAError("Dealocation"); int* R_idx_d; cudaMalloc((void**) &R_idx_d, nnzElements * sizeof(int)); checkCUDAError("kernel execution Alloc R_idx_d"); cudaMemcpy(R_idx_d, Row_IDX, nnzElements * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy Ridx_d"); float l1sum = cublasSasum(n, x_d, 1); float gNorm = cublasSnrm2(m, redisuals_dev, 1); printf("k=%d,L1 sum part:%1.16f, \|Ax-b\|^2=%1.16f, fval=%1.16f", 0, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); fprintf(fp, "k=%d,L1 sum part:%1.16f, \|Ax-b\|^2=%1.16f, fval=%1.16f", 0, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); print_time_message(&t1, "Idem spustat paralelny RCDM"); parallel_iterations = 0; float parallelExecutionTime; for (int k = 0; k < 24; k++) { t_start = clock(); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < PRCDMExecutionTime) { RCDMKernel<<< dimGridRCDM2, dimBlockRCD2 >>>(A_d, R_idx_d, n_d, redisuals_dev, x_d, lambda_d, Li_dev, devStates); parallel_iterations += dim1 * dim2; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&parallelExecutionTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("k= %d\n", k); printf("parallel random iterations:%d totalMultiplier %d\n", parallel_iterations, NMAXITERKernel * TOTALTHREDSPERBLOCK); printf("trvanie %f ms , %f iter/sec , speedup:%f \n", parallelExecutionTime, parallel_iterations / parallelExecutionTime * 1000 * NMAXITERKernel * TOTALTHREDSPERBLOCK, TOTALTHREDSPERBLOCK * NMAXITERKernel * parallel_iterations / parallelExecutionTime / serialItPsec); fprintf(fp, "k= %d\n", k); fprintf(fp, "parallel random iterations:%d toitalMultiplies %d\n", parallel_iterations, NMAXITERKernel * TOTALTHREDSPERBLOCK); fprintf(fp, "trvanie %f ms , %f iter/sec , speedup:%f \n", parallelExecutionTime, parallel_iterations / parallelExecutionTime * 1000 * NMAXITERKernel * TOTALTHREDSPERBLOCK, TOTALTHREDSPERBLOCK * NMAXITERKernel * parallel_iterations / parallelExecutionTime / serialItPsec); print_time_message(&t1, "END spustat paralelny RCDM"); cudaMemcpy(x, x_d, n * sizeof(float), cudaMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); string fileName = ""; fileName = "/tmp/ttd_r_parallel_"; std::ostringstream oss; oss << k; fileName += oss.str(); l1sum = cublasSasum(n, x_d, 1); gNorm = cublasSnrm2(m, redisuals_dev, 1); printf("k=%d,L1 sum part:%f, \|Ax-b\|^2=%f, fval=%f", k, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); fprintf(fp, "k=%d,L1 sum part:%f, \|Ax-b\|^2=%f, fval=%f", k, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); saveSolutionIntoFile(fileName.c_str(), x, n, nodeDescription, treshHold); } //------------------------------------------------------ //===================== Nesterov Composite Accelerated float A_nest = 0; float L_nest = L[0]; float mu = 0; float v[n]; float y[n]; float derv[n]; float derv2[n]; float dervHistory[n]; for (int i = 0; i < n; i++) { x[i] = 0; v[i] = 0; derv[i] = 0; dervHistory[i] = 0; } float gamma_u = 2; float gamma_d = gamma_u; float quadSolTmp = 0; parallel_iterations = 0; t_start = clock(); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < executionTime) { int doRepeat = 1; while (doRepeat) { quadSolTmp = 2 * (1 + A_nest * mu) / L_nest; float a = (quadSolTmp + sqrt(quadSolTmp * quadSolTmp + 4 * A_nest * quadSolTmp)) / 2; float convCombA = A_nest / (a + A_nest); float convComba = a / (a + A_nest); for (int i = 0; i < n; i++) { y[i] = convCombA * x[i] + convComba * v[i]; } for (int i = 0; i < n; i++) { derv[i] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * y[Col_IDX[i] - 1]; } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } float LLocal = L_nest; float Li = 1 / LLocal; float lambdaOverL = lambda * Li; for (int j = 0; j < n; j++) { float xLocal = x[j]; float alpha = derv[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else t = -xLocal; TVALUES[j] = t; } for (int j = 0; j < n; j++) { derv2[j] = -L_nest * TVALUES[j] - derv[j]; derv[j] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * (y[Col_IDX[i] - 1] + TVALUES[Col_IDX[i] - 1]); } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } for (int i = 0; i < n; i++) { derv[i] += derv2[i]; } float LS = 0; float RS = 0; for (int i = 0; i < n; i++) { LS -= derv[i] * TVALUES[i]; RS += derv[i] * derv[i]; } RS = RS / L_nest; if (LS < RS) { L_nest = L_nest * gamma_u; doRepeat = 1; } else { doRepeat = 0; A_nest += a; for (int i = 0; i < n; i++) { x[i] = y[i] + TVALUES[i]; } L_nest = L_nest / gamma_d; float ratioA = (A_nest - a) / A_nest; float ratioB = (a) / A_nest; for (int i = 0; i < n; i++) { dervHistory[i] = ratioA * dervHistory[i] + ratioB * (derv[i] - derv2[i]); } float LLocal = 1 / A_nest; float Li = A_nest; float lambdaOverL = lambda * Li; for (int i = 0; i < n; i++) { float alpha = dervHistory[i] * Li; float deltaL = -alpha - lambdaOverL; float deltaR = -alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL; else if (deltaR < 0) t = deltaR; else t = 0; v[i] = t; } } } parallel_iterations++; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&setialExecutionTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("serial AC iterations:%d\n", parallel_iterations); printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, parallel_iterations / setialExecutionTime * 1000); fprintf(fp, "serial AC iterations:%d\n", parallel_iterations); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, parallel_iterations / setialExecutionTime * 1000); saveSolutionIntoFile("/tmp/ttd_ac.txt", x, n, nodeDescription, treshHold); //------------------------------------------------------- cudaFree(Li_dev); checkCUDAError("Dealocationa"); cudaFree(R_idx_d); checkCUDAError("Dealocationb"); cudaFree(redisuals_dev); checkCUDAError("Dealocationc"); cudaFree(lambda_d); checkCUDAError("Dealocationd"); cudaFree(A_d); checkCUDAError("Dealocatione"); cudaFree(n_d); checkCUDAError("Dealocationg"); cudaFree(x_d); checkCUDAError("Dealocationh"); cudaFree(derivatives_d); checkCUDAError("Dealocationi"); print_time_message(&t1, "Device memory DeAllocated"); fclose(fp); } void oneDayPG(int row, int col, int exampleType, float executionTime, float lambda) { int dim1 = 14; int dim2 = 1; float PRCDMExecutionTime = 1; float PGExecutionTime = 600; float SRCDMExecutionTime = 1; int m, n; clock_t t1; float * A_TTD; int * Row_IDX; int * Col_IDX; int nnzElements; int* nodeDescription; //-------------------------------GET BOUNDARY POINTS int * boundary; int boundarySize = 0; getBoundaryVector(row, col, exampleType, &boundary, &boundarySize); cout << "Boundary size is " << boundarySize << "\n"; //-------------------------------GENERATE PROBLEMS t1 = clock(); generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX, &Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription, ROWSCALE); print_time_message(&t1, "Getting problem dimension"); cout << "Dimension of your problem is " << m << " x " << n << "\n"; printf("Number of NNZ: %d\n", nnzElements); //-------------------------------GET FORCE VECTORS float* g; getForceVector(&g, m, row, col, exampleType); for (int i = 0; i < m; i++) { g[i] = -g[i]; } print_time_message(&t1, "Force vector obtained"); //----------------------------------------- FILE *fp; // fp = fopen("/tmp/asdf", "w"); // // for (int i = 0; i < nnzElements; i++) { // fprintf(fp,"%d,%d,%f\n", Row_IDX[i], Col_IDX[i], A_TTD[i]); // } // fclose(fp); // fp = fopen("/tmp/ttd_vectorB.csv", "w"); // // for (int i = 0; i < m; i++) { // fprintf(fp,"%d,%f\n", i,-g[i]); // } // fclose(fp); // print_time_message(&t1, "Data saved"); //----------------------------------------- float x[n]; float L[n]; float Li[n]; for (int i = 0; i < n; i++) { x[i] = 0; L[i] = 0; for (int k = 4 * i; k < 4 * i + 4; k++) { L[i] += A_TTD[k] * A_TTD[k]; } Li[i] = 1 / L[i]; } //-------------------------------Preparing A' in colum orientated int ATcount[m]; int actualCount[m]; int rowCounts[m]; for (int i = 0; i < m; i++) { ATcount[i] = 0; actualCount[i] = 0; } for (int i = 0; i < nnzElements; i++) { ATcount[Row_IDX[i] - 1]++; // Shift from 1-based to 0 based } rowCounts[0] = ATcount[0]; for (int i = 1; i < m; i++) { int tmpCount = ATcount[i]; rowCounts[i] = ATcount[i]; ATcount[i] += ATcount[i - 1]; actualCount[i] = ATcount[i] - tmpCount; } for (int i = 0; i < m; i++) { ATcount[i] = actualCount[i]; } float ATCB[nnzElements]; int ColAT[nnzElements]; for (int i = 0; i < n; i++) { for (int j = 4 * i; j < 4 * i + 4; j++) { int tmprow = Row_IDX[j] - 1; ColAT[actualCount[tmprow]] = i; ATCB[actualCount[tmprow]] = A_TTD[j]; actualCount[tmprow]++; } } fp = fopen("/tmp/ttd_log.txt", "w"); //-------------------------------Inicializacia CuSpare Library /* allocate GPU memory and copy the matrix and vectors into it */ cusparseStatus_t status; cusparseHandle_t handle = 0; /* initialize cusparse library */ status = cusparseCreate(&handle); if (status != CUSPARSE_STATUS_SUCCESS) { printf("CUSPARSE Library initialization failed"); } //-------------------------------Alokacia device memroies float* A_d; int* C_idx_d; cudaMalloc((void**) &A_d, nnzElements * sizeof(float)); checkCUDAError("kernel execution Alloc A_d"); cudaMalloc((void**) &C_idx_d, nnzElements * sizeof(int)); checkCUDAError("kernel execution Alloc R_IDX"); int * n_d; cudaMalloc((void**) &n_d, 1 * sizeof(int)); checkCUDAError("kernel execution Alloc ..."); float* derivatives_d; cudaMalloc((void**) &derivatives_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc g_d"); float* L_d; cudaMalloc((void**) &L_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc Li_d"); int* ATcount_d; cudaMalloc((void**) &ATcount_d, m * sizeof(int)); checkCUDAError("kernel execution Alloc AtCount_d"); float* x_d; cudaMalloc((void**) &x_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc x_d"); float* lambda_d; cudaMalloc((void**) &lambda_d, 1 * sizeof(float)); checkCUDAError("kernel execution Alloc lambda_d"); // float* Li_d; // cudaMalloc((void**) &Li_d, n * sizeof(float)); // checkCUDAError("kernel execution Alloc Li_d"); print_time_message(&t1, "Device memory allocated"); //-------------------------------Copy data cudaMemcpy(A_d, &ATCB[0], nnzElements * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy A_d"); cudaMemcpy(C_idx_d, &ColAT[0], nnzElements * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy R"); cudaMemcpy(derivatives_d, x, n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); cudaMemcpy(x_d, x, n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); cudaMemcpy(n_d, &n, 1 * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); cudaMemcpy(L_d, L, n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); for (int i = m - 1; i > 0; i--) { actualCount[i] -= actualCount[i - 1]; } cudaMemcpy(ATcount_d, actualCount, m * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); print_time_message(&t1, "Data coppied"); //-------------------------------Prepare of Derivatives vector on device for (int i = 0; i < m; i++) { if (g[i] != 0) { cusparseStatus_t copystatus = cusparseSaxpyi(handle, rowCounts[i], g[i], &A_d[ATcount[i]], &C_idx_d[ATcount[i]], derivatives_d, CUSPARSE_INDEX_BASE_ZERO); if (copystatus != CUSPARSE_STATUS_SUCCESS) { printf("Nastala chyba!"); } } } print_time_message(&t1, "Derivatives vector on device"); //--------------------------------------------------------------------- //------------------------------Serial Benchmark----------------------- float derivatives[n]; cudaMemcpy(&derivatives[0], derivatives_d, n * sizeof(float), cudaMemcpyDeviceToHost); print_time_message(&t1, "Initial Shrink Start"); int max_IDX = 0; float max_VAL = 0; float optimalStep = 0; float TVALUES[n]; float EVALUES[n]; for (int j = 0; j < n; j++) { float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; float mlt2 = 1; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else { t = -xLocal; mlt2 = 0; } float tmpEnergy = t * t * LLocal; TVALUES[j] = t; // EVALUES[j] = tmpEnergy / 2;//+ lambda*x[j]*( signbit(x[j]) -signbit(x[j]+TVALUES[j]) ); EVALUES[j] = mlt2 * (tmpEnergy / 2 + lambda * xLocal * (signbit(xLocal) - signbit(xLocal + t))) + (1 - mlt2) * (lambda * xLocal * signbit(xLocal) - derivatives[j] * t - LLocal * t * t / 2); if (tmpEnergy > max_VAL) { optimalStep = t; max_VAL = tmpEnergy; max_IDX = j; } } print_time_message(&t1, "Initial Shrink End"); print_time_message(&t1, "Start serial Code"); float setialExecutionTime = 0; long long setialItetaions = 0; clock_t t_start; t_start = clock(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < executionTime) { // printf("optimal t_idx=%d, tval=%f, elapsedTime:%f\n", max_IDX, TVALUES[max_IDX],getTotalElapsetTime(&t_start)); // Update x[max_IDX] += TVALUES[max_IDX]; optimalStep = TVALUES[max_IDX]; for (int ki = max_IDX * 4; ki < max_IDX * 4 + 4; ki++) { for (int jj = ATcount[Row_IDX[ki] - 1]; jj < ATcount[Row_IDX[ki] - 1] + rowCounts[Row_IDX[ki] - 1]; jj++) { int j = ColAT[jj]; derivatives[j] += optimalStep * A_TTD[ki] * ATCB[jj]; float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; float mlt2 = 1; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else { t = -xLocal; mlt2 = 0; } float tmpEnergy = t * t * LLocal; TVALUES[j] = t; // EVALUES[j] = tmpEnergy / 2;//+ lambda*x[j]*( signbit(x[j]) -signbit(x[j]+TVALUES[j]) ); EVALUES[j] = mlt2 * (tmpEnergy / 2 + lambda * xLocal * (signbit(xLocal) - signbit(xLocal + t))) + (1 - mlt2) * (lambda * xLocal * signbit(xLocal) - derivatives[j] * t - LLocal * t * t / 2); } } max_VAL = 0; max_IDX = 0; for (int j = 0; j < n; j++) { if (EVALUES[j] > max_VAL) { max_VAL = EVALUES[j]; max_IDX = j; } } setialItetaions++; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&setialExecutionTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); float setialIterationPerSec = setialItetaions / setialExecutionTime * 1000; printf("Serial execution time:%f ms = %f it/sec \n", setialExecutionTime, setialIterationPerSec); fprintf(fp, "Serial execution time:%f ms = %f it/sec \n", setialExecutionTime, setialIterationPerSec); printf("serial greedy iterations:%d\n", setialItetaions); fprintf(fp, "serial greedy iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_g_serial.txt", x, n, nodeDescription, 0); //-------------------------------Computation float* T_dev; float* E_dev; cublasAlloc(n, sizeof(float), (void**) &T_dev); checkCUDAError("Alokacia T_dev"); cublasAlloc(n, sizeof(float), (void**) &E_dev); checkCUDAError("Alokacia E_dev"); float time; cusparseStatus_t copystatus; cudaMemcpy(lambda_d, &lambda, 1 * sizeof(float), cudaMemcpyHostToDevice); dim3 dimBlock( TOTALTHREDSPERBLOCK); dim3 dimGridRCDM( 1, 1+n/(TOTALTHREDSPERBLOCK)); float tPoint[1]; int maxIndex = 10; ShrinkKernel<<< dimGridRCDM ,dimBlock >>>(T_dev, E_dev, derivatives_d, L_d,x_d, lambda, n); int maxShrinkSubset = 0; for (int i = 0; i < m; i++) { if (rowCounts[i] > maxShrinkSubset) maxShrinkSubset = rowCounts[i]; } printf("Max shrink subset %d\n", maxShrinkSubset); dim3 dimGridShrinkSubset( 1, 1+maxShrinkSubset/(TOTALTHREDSPERBLOCK)); long long parallel_iterations = 0; float treshHold = 0; for (int k = 0; k < 114; k++) { cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); t_start = clock(); while (getTotalElapsetTime(&t_start) < PGExecutionTime) { for (int asdf = 0; asdf < 100; asdf++) { maxIndex = cublasIsamax(n, E_dev, 1); maxIndex = maxIndex - 1; // printf("%d;Selected index:%d\n", i, maxIndex); IncreaseElementAndShrink<<< 1 ,1 >>>( maxIndex,T_dev, E_dev, derivatives_d, L_d,x_d, lambda, n ); // IncreaseElement<<< 1 ,1 >>>(x_d, maxIndex, T_dev); checkCUDAError("IncreaseElement"); cublasGetVector(1, sizeof(float), &T_dev[maxIndex], 1, tPoint, 1); checkCUDAError("get T"); // printf("optimal t_idx=%d and value = %f\n", maxIndex, tPoint[0]); for (int ki = maxIndex * 4; ki < maxIndex * 4 + 4; ki++) { // printf("pridat %f %f %d\n", tPoint[0], A_TTD[ki],Row_IDX[ki]-1); copystatus = cusparseSaxpyi(handle, rowCounts[Row_IDX[ki] - 1], tPoint[0] * A_TTD[ki], &A_d[ATcount[Row_IDX[ki] - 1]], &C_idx_d[ATcount[Row_IDX[ki] - 1]], derivatives_d, CUSPARSE_INDEX_BASE_ZERO); if (copystatus != CUSPARSE_STATUS_SUCCESS) printf("Nastala chyba pri CuSparse!\n"); // cudaThreadSynchronize(); ShrinkKernelSubset<<< dimGridShrinkSubset ,dimBlock >>>(T_dev, E_dev, derivatives_d, L_d, x_d, lambda, &ATcount_d[Row_IDX[ki] - 1], &C_idx_d[ATcount[Row_IDX[ki] - 1]]); // checkCUDAError("Shrink subset"); } // ShrinkKernel<<< dimGridRCDM ,dimBlock >>>(T_dev, E_dev, derivatives_d, L_d,x_d, // lambda, n); parallel_iterations++; } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("pridane %f \n", tPoint[0]); printf("trvanie %f ms , %f iter/sec speedUp:%f \n", time, parallel_iterations / time * 1000, parallel_iterations / time * 1000 / setialIterationPerSec); printf("parallel greedy iterations:%d\n", parallel_iterations); fprintf(fp, "trvanie %f ms , %f iter/sec speedUp:%f \n", time, parallel_iterations / time * 1000, parallel_iterations / time * 1000 / setialIterationPerSec); fprintf(fp, "parallel greedy iterations:%d\n", parallel_iterations); cudaMemcpy(x, x_d, n * sizeof(float), cudaMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); string fileName = ""; fileName = "/tmp/ttd_g_parallel_"; std::ostringstream oss; oss << k; fileName += oss.str(); // printf("k=%d,L1 sum part:%f, \|Ax-b\|^2=%f, fval=%f", k, l1sum, gNorm // * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); // fprintf(fp, "k=%d,L1 sum part:%f, \|Ax-b\|^2=%f, fval=%f", k, l1sum, // gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); saveSolutionIntoFile(fileName.c_str(), x, n, nodeDescription, treshHold); } print_time_message(&t1, "Computation"); //-------------------------------DeAllocation cudaFree(ATcount_d); print_time_message(&t1, "Device memory DeAllocated"); float residuals[m]; float b[m]; float* redisuals_dev; for (int i = 0; i < m; i++) { residuals[i] = g[i]; b[i] = -g[i]; } for (int i = 0; i < n; i++) x[i] = 0; cudaMalloc((void**) &redisuals_dev, m * sizeof(float)); checkCUDAError("kernel execution Alloc residuals_d"); cudaMemcpy(redisuals_dev, &residuals[0], m * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy residuals_d"); cudaMemcpy(x_d, &x[0], n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy x"); setialItetaions = 0; t_start = clock(); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < SRCDMExecutionTime) { float tmp = (float) rand() / RAND_MAX; int idx = (int) (n * tmp); tmp = 0; for (int j = idx * 4; j < idx * 4 + 4; j++) { //printf("A=%f, res=%f,colid=%d\n",A_TTD[j],residuals[Col_IDX[j]],Col_IDX[j]); tmp += A_TTD[j] * residuals[Row_IDX[j] - 1]; } float tmp1 = Li[idx] * (tmp + lambda); if (x[idx] > tmp1) { tmp = -tmp1; } else { tmp1 = Li[idx] * (tmp - lambda); if (x[idx] < tmp1) { tmp = -tmp1; } else { tmp = -x[idx]; } } x[idx] += tmp; // printf("ID:%d, value%f\n",idx, x[idx]); for (int j = idx * 4; j < idx * 4 + 4; j++) { residuals[Row_IDX[j] - 1] += tmp * A_TTD[j]; } setialItetaions++; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&setialExecutionTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); float serialItPsec = (float) setialItetaions / setialExecutionTime; printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); printf("serial random iterations:%d\n", setialItetaions); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); fprintf(fp, "serial random iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_r_serial.txt", x, n, nodeDescription, 0); //-------------------------------------------------- // ============ Parallel Random dim3 dimBlockRCD2( TOTALTHREDSPERBLOCK); dim3 dimGridRCDM2( dim1, dim2); curandState *devStates; cudaMalloc((void **) &devStates, dim1 * dim2 * TOTALTHREDSPERBLOCK * sizeof(curandState)); setup_kernel<<< dimGridRCDM2, dimBlockRCD2 >>>(devStates); checkCUDAError("Inicializacia ranom states"); cudaFree(L_d); checkCUDAError("Dealocation"); float* Li_dev; cudaMalloc((void**) &Li_dev, n * sizeof(float)); checkCUDAError("kernel execution Alloc Li_d"); cudaMemcpy(Li_dev, &Li[0], n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Coppy Li_d"); cudaMemcpy(A_d, A_TTD, nnzElements * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy A_d"); cudaFree(C_idx_d); checkCUDAError("Dealocation"); int* R_idx_d; cudaMalloc((void**) &R_idx_d, nnzElements * sizeof(int)); checkCUDAError("kernel execution Alloc R_idx_d"); cudaMemcpy(R_idx_d, Row_IDX, nnzElements * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy Ridx_d"); float l1sum = cublasSasum(n, x_d, 1); float gNorm = cublasSnrm2(m, redisuals_dev, 1); printf("k=%d,L1 sum part:%1.16f, \|Ax-b\|^2=%1.16f, fval=%1.16f", 0, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); fprintf(fp, "k=%d,L1 sum part:%1.16f, \|Ax-b\|^2=%1.16f, fval=%1.16f", 0, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); print_time_message(&t1, "Idem spustat paralelny RCDM"); parallel_iterations = 0; float parallelExecutionTime; for (int k = 0; k < 0; k++) { t_start = clock(); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < PRCDMExecutionTime) { RCDMKernel<<< dimGridRCDM2, dimBlockRCD2 >>>(A_d, R_idx_d, n_d, redisuals_dev, x_d, lambda_d, Li_dev, devStates); parallel_iterations += dim1 * dim2; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&parallelExecutionTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("k= %d\n", k); printf("parallel random iterations:%d totalMultiplier %d\n", parallel_iterations, NMAXITERKernel * TOTALTHREDSPERBLOCK); printf("trvanie %f ms , %f iter/sec , speedup:%f \n", parallelExecutionTime, parallel_iterations / parallelExecutionTime * 1000 * NMAXITERKernel * TOTALTHREDSPERBLOCK, TOTALTHREDSPERBLOCK * NMAXITERKernel * parallel_iterations / parallelExecutionTime / serialItPsec); fprintf(fp, "k= %d\n", k); fprintf(fp, "parallel random iterations:%d toitalMultiplies %d\n", parallel_iterations, NMAXITERKernel * TOTALTHREDSPERBLOCK); fprintf(fp, "trvanie %f ms , %f iter/sec , speedup:%f \n", parallelExecutionTime, parallel_iterations / parallelExecutionTime * 1000 * NMAXITERKernel * TOTALTHREDSPERBLOCK, TOTALTHREDSPERBLOCK * NMAXITERKernel * parallel_iterations / parallelExecutionTime / serialItPsec); print_time_message(&t1, "END spustat paralelny RCDM"); cudaMemcpy(x, x_d, n * sizeof(float), cudaMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); string fileName = ""; fileName = "/tmp/ttd_r_parallel_"; std::ostringstream oss; oss << k; fileName += oss.str(); l1sum = cublasSasum(n, x_d, 1); gNorm = cublasSnrm2(m, redisuals_dev, 1); printf("k=%d,L1 sum part:%f, \|Ax-b\|^2=%f, fval=%f", k, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); fprintf(fp, "k=%d,L1 sum part:%f, \|Ax-b\|^2=%f, fval=%f", k, l1sum, gNorm * gNorm, lambda * l1sum + 0.5 * gNorm * gNorm); saveSolutionIntoFile(fileName.c_str(), x, n, nodeDescription, treshHold); } //------------------------------------------------------ //===================== Nesterov Composite Accelerated float A_nest = 0; float L_nest = L[0]; float mu = 0; float v[n]; float y[n]; float derv[n]; float derv2[n]; float dervHistory[n]; for (int i = 0; i < n; i++) { x[i] = 0; v[i] = 0; derv[i] = 0; dervHistory[i] = 0; } float gamma_u = 2; float gamma_d = gamma_u; float quadSolTmp = 0; parallel_iterations = 0; t_start = clock(); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); while (getTotalElapsetTime(&t_start) < executionTime) { int doRepeat = 1; while (doRepeat) { quadSolTmp = 2 * (1 + A_nest * mu) / L_nest; float a = (quadSolTmp + sqrt(quadSolTmp * quadSolTmp + 4 * A_nest * quadSolTmp)) / 2; float convCombA = A_nest / (a + A_nest); float convComba = a / (a + A_nest); for (int i = 0; i < n; i++) { y[i] = convCombA * x[i] + convComba * v[i]; } for (int i = 0; i < n; i++) { derv[i] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * y[Col_IDX[i] - 1]; } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } float LLocal = L_nest; float Li = 1 / LLocal; float lambdaOverL = lambda * Li; for (int j = 0; j < n; j++) { float xLocal = x[j]; float alpha = derv[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else t = -xLocal; TVALUES[j] = t; } for (int j = 0; j < n; j++) { derv2[j] = -L_nest * TVALUES[j] - derv[j]; derv[j] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * (y[Col_IDX[i] - 1] + TVALUES[Col_IDX[i] - 1]); } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } for (int i = 0; i < n; i++) { derv[i] += derv2[i]; } float LS = 0; float RS = 0; for (int i = 0; i < n; i++) { LS -= derv[i] * TVALUES[i]; RS += derv[i] * derv[i]; } RS = RS / L_nest; if (LS < RS) { L_nest = L_nest * gamma_u; doRepeat = 1; } else { doRepeat = 0; A_nest += a; for (int i = 0; i < n; i++) { x[i] = y[i] + TVALUES[i]; } L_nest = L_nest / gamma_d; float ratioA = (A_nest - a) / A_nest; float ratioB = (a) / A_nest; for (int i = 0; i < n; i++) { dervHistory[i] = ratioA * dervHistory[i] + ratioB * (derv[i] - derv2[i]); } float LLocal = 1 / A_nest; float Li = A_nest; float lambdaOverL = lambda * Li; for (int i = 0; i < n; i++) { float alpha = dervHistory[i] * Li; float deltaL = -alpha - lambdaOverL; float deltaR = -alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL; else if (deltaR < 0) t = deltaR; else t = 0; v[i] = t; } } } parallel_iterations++; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&setialExecutionTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("serial AC iterations:%d\n", parallel_iterations); printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, parallel_iterations / setialExecutionTime * 1000); fprintf(fp, "serial AC iterations:%d\n", parallel_iterations); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, parallel_iterations / setialExecutionTime * 1000); saveSolutionIntoFile("/tmp/ttd_ac.txt", x, n, nodeDescription, treshHold); //------------------------------------------------------- cudaFree(Li_dev); checkCUDAError("Dealocationa"); cudaFree(R_idx_d); checkCUDAError("Dealocationb"); cudaFree(redisuals_dev); checkCUDAError("Dealocationc"); cudaFree(lambda_d); checkCUDAError("Dealocationd"); cudaFree(A_d); checkCUDAError("Dealocatione"); cudaFree(n_d); checkCUDAError("Dealocationg"); cudaFree(x_d); checkCUDAError("Dealocationh"); cudaFree(derivatives_d); checkCUDAError("Dealocationi"); print_time_message(&t1, "Device memory DeAllocated"); fclose(fp); } void niceAC(int row, int col, int exampleType, float executionTime, float lambda) { int dim1 = 14; int dim2 = 1; float PRCDMExecutionTime = 0; float SRCDMExecutionTime = PRCDMExecutionTime; float setialExecutionTime = 0; int m, n; clock_t t1; float * A_TTD; int * Row_IDX; int * Col_IDX; int nnzElements; int* nodeDescription; clock_t t_start; //-------------------------------GET BOUNDARY POINTS int * boundary; int boundarySize = 0; getBoundaryVector(row, col, exampleType, &boundary, &boundarySize); cout << "Boundary size is " << boundarySize << "\n"; //-------------------------------GENERATE PROBLEMS t1 = clock(); generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX, &Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription, ROWSCALE); print_time_message(&t1, "Getting problem dimension"); cout << "Dimension of your problem is " << m << " x " << n << "\n"; printf("Number of NNZ: %d\n", nnzElements); //-------------------------------GET FORCE VECTORS float* g; float b[m]; getForceVector(&g, m, row, col, exampleType); for (int i = 0; i < m; i++) { b[i] = g[i]; g[i] = -g[i]; } print_time_message(&t1, "Force vector obtained"); //----------------------------------------- FILE *fp; // fp = fopen("/tmp/asdf", "w"); // // for (int i = 0; i < nnzElements; i++) { // fprintf(fp,"%d,%d,%f\n", Row_IDX[i], Col_IDX[i], A_TTD[i]); // } // fclose(fp); // fp = fopen("/tmp/ttd_vectorB.csv", "w"); // // for (int i = 0; i < m; i++) { // fprintf(fp,"%d,%f\n", i,-g[i]); // } // fclose(fp); // print_time_message(&t1, "Data saved"); //----------------------------------------- float x[n]; float L[n]; float Li[n]; for (int i = 0; i < n; i++) { x[i] = 0; L[i] = 0; for (int k = 4 * i; k < 4 * i + 4; k++) { L[i] += A_TTD[k] * A_TTD[k]; } Li[i] = 1 / L[i]; } //-------------------------------Preparing A' in colum orientated int ATcount[m]; int actualCount[m]; int rowCounts[m]; for (int i = 0; i < m; i++) { ATcount[i] = 0; actualCount[i] = 0; } for (int i = 0; i < nnzElements; i++) { ATcount[Row_IDX[i] - 1]++; // Shift from 1-based to 0 based } rowCounts[0] = ATcount[0]; for (int i = 1; i < m; i++) { int tmpCount = ATcount[i]; rowCounts[i] = ATcount[i]; ATcount[i] += ATcount[i - 1]; actualCount[i] = ATcount[i] - tmpCount; } for (int i = 0; i < m; i++) { ATcount[i] = actualCount[i]; } float ATCB[nnzElements]; int ColAT[nnzElements]; for (int i = 0; i < n; i++) { for (int j = 4 * i; j < 4 * i + 4; j++) { int tmprow = Row_IDX[j] - 1; ColAT[actualCount[tmprow]] = i; ATCB[actualCount[tmprow]] = A_TTD[j]; actualCount[tmprow]++; } } fp = fopen("/tmp/ttd_log.txt", "w"); //-------------------------------Inicializacia CuSpare Library /* allocate GPU memory and copy the matrix and vectors into it */ float residuals[m]; float* redisuals_dev; for (int i = 0; i < m; i++) { residuals[i] = g[i]; } for (int i = 0; i < n; i++) x[i] = 0; long long setialItetaions = 0; t_start = clock(); while (getTotalElapsetTime(&t_start) < SRCDMExecutionTime) { float tmp = (float) rand() / RAND_MAX; int idx = (int) (n * tmp); tmp = 0; for (int j = idx * 4; j < idx * 4 + 4; j++) { //printf("A=%f, res=%f,colid=%d\n",A_TTD[j],residuals[Col_IDX[j]],Col_IDX[j]); tmp += A_TTD[j] * residuals[Row_IDX[j] - 1]; } float tmp1 = Li[idx] * (tmp + lambda); if (x[idx] > tmp1) { tmp = -tmp1; } else { tmp1 = Li[idx] * (tmp - lambda); if (x[idx] < tmp1) { tmp = -tmp1; } else { tmp = -x[idx]; } } x[idx] += tmp; // printf("ID:%d, value%f\n",idx, x[idx]); for (int j = idx * 4; j < idx * 4 + 4; j++) { residuals[Row_IDX[j] - 1] += tmp * A_TTD[j]; } setialItetaions++; } float serialItPsec = (float) setialItetaions / setialExecutionTime; printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); printf("serial random iterations:%d\n", setialItetaions); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); fprintf(fp, "serial random iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_r_serial.txt", x, n, nodeDescription, 0); //===================== Nesterov Composite Accelerated float A_nest = 0; float L_nest = L[0]; float mu = 0; float v[n]; float y[n]; float derv[n]; float derv2[n]; float dervHistory[n]; for (int i = 0; i < n; i++) { x[i] = 0; v[i] = 0; derv[i] = 0; dervHistory[i] = 0; } float gamma_u = 2; float gamma_d = gamma_u; float quadSolTmp = 0; float TVALUES[n]; float treshHold = 0; int parallel_iterations = 0; t_start = clock(); while (getTotalElapsetTime(&t_start) < executionTime) { int doRepeat = 1; while (doRepeat) { quadSolTmp = 2 * (1 + A_nest * mu) / L_nest; float a = (quadSolTmp + sqrt(quadSolTmp * quadSolTmp + 4 * A_nest * quadSolTmp)) / 2; float convCombA = A_nest / (a + A_nest); float convComba = a / (a + A_nest); for (int i = 0; i < n; i++) { y[i] = convCombA * x[i] + convComba * v[i]; } for (int i = 0; i < n; i++) { derv[i] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * y[Col_IDX[i] - 1]; } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } float LLocal = L_nest; float Li = 1 / LLocal; float lambdaOverL = lambda * Li; for (int j = 0; j < n; j++) { float xLocal = x[j]; float alpha = derv[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else t = -xLocal; TVALUES[j] = t; } for (int j = 0; j < n; j++) { derv2[j] = -L_nest * TVALUES[j] - derv[j]; derv[j] = 0; } for (int i = 0; i < m; i++) { g[i] = -b[i]; } for (int i = 0; i < nnzElements; i++) // derv = Ax-b { g[Row_IDX[i] - 1] += A_TTD[i] * (y[Col_IDX[i] - 1] + TVALUES[Col_IDX[i] - 1]); } for (int i = 0; i < nnzElements; i++) { derv[Col_IDX[i] - 1] += g[Row_IDX[i] - 1] * A_TTD[i]; } for (int i = 0; i < n; i++) { derv[i] += derv2[i]; } float LS = 0; float RS = 0; for (int i = 0; i < n; i++) { LS -= derv[i] * TVALUES[i]; RS += derv[i] * derv[i]; } RS = RS / L_nest; if (LS < RS) { L_nest = L_nest * gamma_u; doRepeat = 1; } else { doRepeat = 0; A_nest += a; for (int i = 0; i < n; i++) { x[i] = y[i] + TVALUES[i]; } L_nest = L_nest / gamma_d; float ratioA = (A_nest - a) / A_nest; float ratioB = (a) / A_nest; for (int i = 0; i < n; i++) { dervHistory[i] = ratioA * dervHistory[i] + ratioB * (derv[i] - derv2[i]); } float LLocal = 1 / A_nest; float Li = A_nest; float lambdaOverL = lambda * Li; for (int i = 0; i < n; i++) { float alpha = dervHistory[i] * Li; float deltaL = -alpha - lambdaOverL; float deltaR = -alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL; else if (deltaR < 0) t = deltaR; else t = 0; v[i] = t; } } } parallel_iterations++; } printf("serial AC iterations:%d\n", parallel_iterations); setialExecutionTime = getElapsetTime(&t_start); fprintf(fp, "serial AC iterations:%d\n", parallel_iterations); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, parallel_iterations / setialExecutionTime * 1000); saveSolutionIntoFile("/tmp/ttd_ac_big.txt", x, n, nodeDescription, treshHold); //------------------------------------------------------- print_time_message(&t1, "Device memory DeAllocated"); fclose(fp); } void niceRCDM(int row, int col, int exampleType, float executionTime, float lambda) { int dim1 = 14; int dim2 = 1; float PRCDMExecutionTime = executionTime; float SRCDMExecutionTime = PRCDMExecutionTime; float setialExecutionTime = 0; int m, n; clock_t t1; float * A_TTD; int * Row_IDX; int * Col_IDX; int nnzElements; int* nodeDescription; clock_t t_start; //-------------------------------GET BOUNDARY POINTS int * boundary; int boundarySize = 0; getBoundaryVector(row, col, exampleType, &boundary, &boundarySize); cout << "Boundary size is " << boundarySize << "\n"; //-------------------------------GENERATE PROBLEMS t1 = clock(); generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX, &Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription, ROWSCALE); print_time_message(&t1, "Getting problem dimension"); cout << "Dimension of your problem is " << m << " x " << n << "\n"; printf("Number of NNZ: %d\n", nnzElements); //-------------------------------GET FORCE VECTORS float* g; float b[m]; getForceVector(&g, m, row, col, exampleType); for (int i = 0; i < m; i++) { b[i] = g[i]; g[i] = -g[i]; } print_time_message(&t1, "Force vector obtained"); //----------------------------------------- FILE *fp; // fp = fopen("/tmp/asdf", "w"); // // for (int i = 0; i < nnzElements; i++) { // fprintf(fp,"%d,%d,%f\n", Row_IDX[i], Col_IDX[i], A_TTD[i]); // } // fclose(fp); // fp = fopen("/tmp/ttd_vectorB.csv", "w"); // // for (int i = 0; i < m; i++) { // fprintf(fp,"%d,%f\n", i,-g[i]); // } // fclose(fp); // print_time_message(&t1, "Data saved"); //----------------------------------------- float x[n]; float L[n]; float Li[n]; for (int i = 0; i < n; i++) { x[i] = 0; L[i] = 0; for (int k = 4 * i; k < 4 * i + 4; k++) { L[i] += A_TTD[k] * A_TTD[k]; } Li[i] = 1 / L[i]; } //-------------------------------Preparing A' in colum orientated int ATcount[m]; int actualCount[m]; int rowCounts[m]; for (int i = 0; i < m; i++) { ATcount[i] = 0; actualCount[i] = 0; } for (int i = 0; i < nnzElements; i++) { ATcount[Row_IDX[i] - 1]++; // Shift from 1-based to 0 based } rowCounts[0] = ATcount[0]; for (int i = 1; i < m; i++) { int tmpCount = ATcount[i]; rowCounts[i] = ATcount[i]; ATcount[i] += ATcount[i - 1]; actualCount[i] = ATcount[i] - tmpCount; } for (int i = 0; i < m; i++) { ATcount[i] = actualCount[i]; } float ATCB[nnzElements]; int ColAT[nnzElements]; for (int i = 0; i < n; i++) { for (int j = 4 * i; j < 4 * i + 4; j++) { int tmprow = Row_IDX[j] - 1; ColAT[actualCount[tmprow]] = i; ATCB[actualCount[tmprow]] = A_TTD[j]; actualCount[tmprow]++; } } fp = fopen("/tmp/ttd_log.txt", "w"); //-------------------------------Inicializacia CuSpare Library /* allocate GPU memory and copy the matrix and vectors into it */ float residuals[m]; float* redisuals_dev; for (int i = 0; i < m; i++) { residuals[i] = g[i]; } for (int i = 0; i < n; i++) x[i] = 0; long long setialItetaions = 0; t_start = clock(); while (getTotalElapsetTime(&t_start) < SRCDMExecutionTime) { float tmp = (float) rand() / RAND_MAX; int idx = (int) (n * tmp); tmp = 0; for (int j = idx * 4; j < idx * 4 + 4; j++) { //printf("A=%f, res=%f,colid=%d\n",A_TTD[j],residuals[Col_IDX[j]],Col_IDX[j]); tmp += A_TTD[j] * residuals[Row_IDX[j] - 1]; } float tmp1 = Li[idx] * (tmp + lambda); if (x[idx] > tmp1) { tmp = -tmp1; } else { tmp1 = Li[idx] * (tmp - lambda); if (x[idx] < tmp1) { tmp = -tmp1; } else { tmp = -x[idx]; } } x[idx] += tmp; // printf("ID:%d, value%f\n",idx, x[idx]); for (int j = idx * 4; j < idx * 4 + 4; j++) { residuals[Row_IDX[j] - 1] += tmp * A_TTD[j]; } setialItetaions++; } float serialItPsec = (float) setialItetaions / setialExecutionTime; printf("trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); printf("serial random iterations:%d\n", setialItetaions); fprintf(fp, "trvanie %f ms , %f iter/sec \n", setialExecutionTime, setialItetaions / setialExecutionTime * 1000); fprintf(fp, "serial random iterations:%d\n", setialItetaions); saveSolutionIntoFile("/tmp/ttd_r_serial_big.txt", x, n, nodeDescription, 0); fclose(fp); } int main(void) { // timeComparison(50, 50, 7, 10000, 0.0001;); // timeComparison(100, 100, 2, 10000, 0.00001); timeComparison(100, 100, 8, 10000, 0.001); // oneDayPG(60, 60, 2, 1, 0.0001); // niceRCDM(200, 200, 2, 80000, 0.0001); // niceAC(200, 200, 2, 80000, 0.0001); // niceProblem(200, 200, 2, 100000, 0.0001); // greedyTTD(); // calculateTTDProblem(); return 1; } void greedyTTD() { int col, row, exampleType; cout << "Enter number of columns: "; col = 80; cin >> col; cout << "Enter number of rows: "; row = 80; cin >> row; // cout << "Enter example type: "; // cin >> exampleType; exampleType = 7; int m, n; clock_t t1;//, t2; float * A_TTD; int * Row_IDX; int * Col_IDX; int nnzElements; int* nodeDescription; //-------------------------------GET BOUNDARY POINTS int * boundary; int boundarySize = 0; getBoundaryVector(row, col, exampleType, &boundary, &boundarySize); cout << "Boundary size is " << boundarySize << "\n"; //-------------------------------GENERATE PROBLEMS t1 = clock(); generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX, &Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription, ROWSCALE); print_time_message(&t1, "Getting problem dimension"); cout << "Dimension of your problem is " << m << " x " << n << "\n"; printf("Number of NNZ: %d\n", nnzElements); //-------------------------------GET FORCE VECTORS float* g; getForceVector(&g, m, row, col, exampleType); for (int i = 0; i < m; i++) { g[i] = -g[i]; } print_time_message(&t1, "Force vector obtained"); float x[n]; float L[n]; for (int i = 0; i < n; i++) { x[i] = 0; L[i] = 0; for (int k = 4 * i; k < 4 * i + 4; k++) { L[i] += A_TTD[k] * A_TTD[k]; } } //-------------------------------Inicializacia CuSpare Library /* allocate GPU memory and copy the matrix and vectors into it */ cusparseStatus_t status; cusparseHandle_t handle = 0; /* initialize cusparse library */ status = cusparseCreate(&handle); if (status != CUSPARSE_STATUS_SUCCESS) { printf("CUSPARSE Library initialization failed"); } //-------------------------------Preparing A' in colum orientated int ATcount[m]; int actualCount[m]; int rowCounts[m]; for (int i = 0; i < m; i++) { ATcount[i] = 0; actualCount[i] = 0; } for (int i = 0; i < nnzElements; i++) { ATcount[Row_IDX[i] - 1]++; // Shift from 1-based to 0 based } rowCounts[0] = ATcount[0]; for (int i = 1; i < m; i++) { int tmpCount = ATcount[i]; rowCounts[i] = ATcount[i]; ATcount[i] += ATcount[i - 1]; actualCount[i] = ATcount[i] - tmpCount; } for (int i = 0; i < m; i++) { ATcount[i] = actualCount[i]; } float ATCB[nnzElements]; int ColAT[nnzElements]; for (int i = 0; i < n; i++) { for (int j = 4 * i; j < 4 * i + 4; j++) { int tmprow = Row_IDX[j] - 1; ColAT[actualCount[tmprow]] = i; ATCB[actualCount[tmprow]] = A_TTD[j]; actualCount[tmprow]++; } } //-------------------------------Alokacia device memroies float* A_d; int* C_idx_d; cudaMalloc((void**) &A_d, nnzElements * sizeof(float)); checkCUDAError("kernel execution Alloc A_d"); cudaMalloc((void**) &C_idx_d, nnzElements * sizeof(int)); checkCUDAError("kernel execution Alloc R_IDX"); int * n_d; cudaMalloc((void**) &n_d, 1 * sizeof(int)); checkCUDAError("kernel execution Alloc ..."); float* derivatives_d; cudaMalloc((void**) &derivatives_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc g_d"); float* L_d; cudaMalloc((void**) &L_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc Li_d"); int* ATcount_d; cudaMalloc((void**) &ATcount_d, m * sizeof(int)); checkCUDAError("kernel execution Alloc AtCount_d"); float* x_d; cudaMalloc((void**) &x_d, n * sizeof(float)); checkCUDAError("kernel execution Alloc x_d"); float* lambda_d; cudaMalloc((void**) &lambda_d, 1 * sizeof(float)); checkCUDAError("kernel execution Alloc lambda_d"); // float* Li_d; // cudaMalloc((void**) &Li_d, n * sizeof(float)); // checkCUDAError("kernel execution Alloc Li_d"); print_time_message(&t1, "Device memory allocated"); //-------------------------------Copy data cudaMemcpy(A_d, &ATCB[0], nnzElements * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy A_d"); cudaMemcpy(C_idx_d, &ColAT[0], nnzElements * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution Copy R"); cudaMemcpy(derivatives_d, x, n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); cudaMemcpy(x_d, x, n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); cudaMemcpy(n_d, &n, 1 * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); cudaMemcpy(L_d, L, n * sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); for (int i = m - 1; i > 0; i--) { actualCount[i] -= actualCount[i - 1]; } // for (int i=0;i<m;i++) //{ // printf("AT count[%d]=%d\n",i,actualCount[i]); //} cudaMemcpy(ATcount_d, actualCount, m * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("kernel execution compy ...."); print_time_message(&t1, "Data coppied"); //-------------------------------Prepare of Derivatives vector on device for (int i = 0; i < m; i++) { if (g[i] != 0) { cusparseStatus_t copystatus = cusparseSaxpyi(handle, rowCounts[i], g[i], &A_d[ATcount[i]], &C_idx_d[ATcount[i]], derivatives_d, CUSPARSE_INDEX_BASE_ZERO); if (copystatus != CUSPARSE_STATUS_SUCCESS) { printf("Nastala chyba!"); } } } print_time_message(&t1, "Derivatives vector on device"); //--------------------------------------------------------------------- float lambda = 0.0001; //------------------------------Serial Benchmark----------------------- float derivatives[n]; cudaMemcpy(&derivatives[0], derivatives_d, n * sizeof(float), cudaMemcpyDeviceToHost); print_time_message(&t1, "Initial Shrink Start"); int max_IDX = 0; float max_VAL = 0; float optimalStep = 0; float TVALUES[n]; float EVALUES[n]; for (int j = 0; j < n; j++) { float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else t = -xLocal; float tmpEnergy = t * t * LLocal; TVALUES[j] = t; EVALUES[j] = tmpEnergy; if (tmpEnergy > max_VAL) { optimalStep = t; max_VAL = tmpEnergy; max_IDX = j; } } print_time_message(&t1, "Initial Shrink End"); print_time_message(&t1, "Start serial Code"); float setialExecutionTime = 0; int setialItetaions = 10; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for (int i = 0; i < setialItetaions; i++) { // printf("optimal t_idx=%d, tval=%f\n", max_IDX, TVALUES[max_IDX]); // Update x[max_IDX] += TVALUES[max_IDX]; optimalStep = TVALUES[max_IDX]; for (int ki = max_IDX * 4; ki < max_IDX * 4 + 4; ki++) { for (int jj = ATcount[Row_IDX[ki] - 1]; jj < ATcount[Row_IDX[ki] - 1] + rowCounts[Row_IDX[ki] - 1]; jj++) { int j = ColAT[jj]; derivatives[j] += optimalStep * A_TTD[ki] * ATCB[jj]; float LLocal = L[j]; float Li = 1 / LLocal; float xLocal = x[j]; float lambdaOverL = lambda * Li; float alpha = derivatives[j] * Li; float deltaL = xLocal - alpha - lambdaOverL; float deltaR = xLocal - alpha + lambdaOverL; float t; if (deltaL > 0) t = deltaL - xLocal; else if (deltaR < 0) t = deltaR - xLocal; else t = -xLocal; TVALUES[j] = t; EVALUES[j] = t * t * LLocal; } } max_VAL = 0; max_IDX = 0; for (int j = 0; j < n; j++) { if (EVALUES[j] > max_VAL) { max_VAL = EVALUES[j]; max_IDX = j; } } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&setialExecutionTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); float setialIterationPerSec = setialItetaions / setialExecutionTime * 1000; printf("Serial execution time:%f ms = %f it/sec \n", setialExecutionTime, setialIterationPerSec); print_time_message(&t1, "Serial time for 10 iterations"); //-------------------------------Computation float* T_dev; float* E_dev; cublasAlloc(n, sizeof(float), (void**) &T_dev); checkCUDAError("Alokacia T_dev"); cublasAlloc(n, sizeof(float), (void**) &E_dev); checkCUDAError("Alokacia E_dev"); int doworking = 1; int iterations = 1; float time; cusparseStatus_t copystatus; while (doworking == 1) { cout << "Current lambda is " << lambda << ". Do you want to change it? (y/n): "; string doContinue; cin >> doContinue; if (doContinue == "y") { cout << "Enter lambda: "; cin >> lambda; } cout << "Enter number of iterations: "; cin >> iterations; cudaMemcpy(lambda_d, &lambda, 1 * sizeof(float), cudaMemcpyHostToDevice); dim3 dimBlock( TOTALTHREDSPERBLOCK); dim3 dimGridRCDM( 1, 1+n/(TOTALTHREDSPERBLOCK)); float tPoint[1]; int maxIndex = 10; ShrinkKernel<<< dimGridRCDM ,dimBlock >>>(T_dev, E_dev, derivatives_d, L_d,x_d, lambda, n); int maxShrinkSubset = 0; for (int i = 0; i < m; i++) { if (rowCounts[i] > maxShrinkSubset) maxShrinkSubset = rowCounts[i]; } printf("Max shrink subset %d\n", maxShrinkSubset); dim3 dimGridShrinkSubset( 1, 1+maxShrinkSubset/(TOTALTHREDSPERBLOCK)); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for (int i = 0; i < iterations; i++) { // ShrinkKernel<<< dimGridRCDM ,dimBlock >>>(T_dev, E_dev, derivatives_d, L_d,x_d, // lambda, n); //maxIndex=10; maxIndex = cublasIsamax(n, E_dev, 1); maxIndex = maxIndex - 1; // printf("%d;Selected index:%d\n", i, maxIndex); IncreaseElement<<< 1 ,1 >>>(x_d, maxIndex, T_dev); checkCUDAError("IncreaseElement"); cublasGetVector(1, sizeof(float), &T_dev[maxIndex], 1, tPoint, 1); checkCUDAError("get T"); // printf("optimal t_idx=%d and value = %f\n", maxIndex, tPoint[0]); for (int ki = maxIndex * 4; ki < maxIndex * 4 + 4; ki++) { // printf("pridat %f %f %d\n", tPoint[0], A_TTD[ki],Row_IDX[ki]-1); copystatus = cusparseSaxpyi(handle, rowCounts[Row_IDX[ki] - 1], tPoint[0] * A_TTD[ki], &A_d[ATcount[Row_IDX[ki] - 1]], &C_idx_d[ATcount[Row_IDX[ki] - 1]], derivatives_d, CUSPARSE_INDEX_BASE_ZERO); if (copystatus != CUSPARSE_STATUS_SUCCESS) printf("Nastala chyba pri CuSparse!\n"); // cudaThreadSynchronize(); ShrinkKernelSubset<<< dimGridShrinkSubset ,dimBlock >>>(T_dev, E_dev, derivatives_d, L_d, x_d, lambda, &ATcount_d[Row_IDX[ki] - 1], &C_idx_d[ATcount[Row_IDX[ki] - 1]]); // checkCUDAError("Shrink subset"); // cudaThreadSynchronize(); // ShrinkKernelSubset<<< dimGridRCDM ,dimBlock >>>(T_dev, E_dev, derivatives_d, // L_d, x_d, lambda, n, &C_idx_d[0]); } // cudaThreadSynchronize(); // cudaMemcpy(x, derivatives_d, n * sizeof(float), // cudaMemcpyDeviceToHost); // checkCUDAError("kernel execution : copy data Derivatives"); // for (int j = 0; j < n; j++) { // printf("der[%d]=%f\n", j, x[j]); // } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("pridane %f \n", tPoint[0]); printf("trvanie %f ms , %f iter/sec speedUp:%f \n", time, iterations / time * 1000, iterations / time * 1000 / setialIterationPerSec); // double parallelPower = totalthreadsOnBlocks; // parallelPower = parallelPower / time; // parallelPower = parallelPower * 1000 * NMAXITERKernel; // printf( // "Elapset time: %f ms; %1.2f iterations/sec; speedup: %1.4f\n", // time, parallelPower, parallelPower / serialPower); // // L1Norm = cublasSasum(n, x_d, 1); // residualsSum = cublasSnrm2(m, g_d, 1); // objectiveValue = 0.5 * residualsSum * residualsSum; // objectiveValue += lambda * L1Norm; // printf("%d:L1 norm = %f, redisuals=%f, objective=%1.16f\n", i, // L1Norm, residualsSum, objectiveValue); cout << "Save particular solution to file? (y/n): "; cin >> doContinue; if (doContinue == "y") { float treshHold; cout << "Enter treshhold for x: "; cin >> treshHold; int writtenBars = 0; FILE *fp; fp = fopen("/tmp/ttd.txt", "w"); cudaMemcpy(x, x_d, n * sizeof(float), cudaMemcpyDeviceToHost); checkCUDAError("kernel execution compy ...."); for (int i = 0; i < n; i++) { if (abs(x[i]) > treshHold) { writtenBars++; fprintf(fp, "%d,%d,%d,%d,%f\n", nodeDescription[i * 4], nodeDescription[i * 4 + 1], nodeDescription[i * 4 + 2], nodeDescription[i * 4 + 3], x[i]); } } fclose(fp); printf("Number of written bars:%d\n", writtenBars); } cout << "Continue? (y/n): "; cin >> doContinue; if (doContinue == "n") { doworking = 0; } } print_time_message(&t1, "Computation"); //-------------------------------DeAllocation cudaFree(lambda_d); cudaFree(A_d); cudaFree(C_idx_d); cudaFree(n_d); cudaFree(x_d); cudaFree(derivatives_d); cudaFree(L_d); } __global__ void RCDMKernel(float *A, int*R_Idx, int* n, float*residuals, float*x, float * lambda, float* Li, curandState* cstate) { int j, i, k; float delta, tmp; //partialDetivative int id = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x + threadIdx.x); curandState localState = cstate[id]; float LambdaParameter = lambda[0]; __shared__ float partialDetivative[TOTALTHREDSPERBLOCK]; float xLocal; float LiLocal; float ALocal[4]; int cidx; int RIDX[4]; for (k = 0; k < NMAXITERKernel; k++) { double d = curand_uniform_double(&localState); int idx = (int) (d * n[0]); // LOAD A, R, residuals // float* residualsAddress[COLUMNLENGTH]; xLocal = x[idx]; LiLocal = Li[idx]; cidx = idx * 4; partialDetivative[threadIdx.x] = 0; // #pragma unroll COLUMNLENGTH for (i = 0; i < 4; i++) { j = cidx + i; ALocal[i] = A[j]; RIDX[i] = R_Idx[j] - 1; // residualsAddress[i] = &residuals[RIDX[i]]; partialDetivative[threadIdx.x] += ALocal[i] * residuals[RIDX[i]]; } tmp = LiLocal * (partialDetivative[threadIdx.x] + LambdaParameter); if (xLocal > tmp) { delta = -tmp; } else { tmp = LiLocal * (partialDetivative[threadIdx.x] - LambdaParameter); if (xLocal < tmp) { delta = -tmp; } else { delta = -xLocal; } } atomicAdd(&x[idx], delta); // atomicAdd(&x[idx], 1); for (i = 0; i < COLUMNLENGTH; i++) { atomicAdd(&residuals[RIDX[i]], ALocal[i] * delta); // atomicAdd(residualsAddress[i], ALocal[i] * delta); } } cstate[id] = localState; }
6255ac6963992090783a9c4271ce2d2633c4e330.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "general_kernels.h" namespace cg = cooperative_groups; template <typename T> __global__ void column_sum_reduce(const T* __restrict__ inp, T* __restrict__ out, int rows, int width) { __shared__ float tile[TILE_DIM][TILE_DIM + 1]; cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b); int idx = blockDim.x * blockIdx.x + threadIdx.x; int y_stride = width * TILE_DIM; float localSum = 0; // Loop across matrix height if (idx < width) { int offset = threadIdx.y * width + idx; for (int r = threadIdx.y; r < rows; r += TILE_DIM) { localSum += (float)inp[offset]; offset += y_stride; } } tile[threadIdx.x][threadIdx.y] = localSum; __syncthreads(); // Sum the shared buffer. float sum = tile[threadIdx.y][threadIdx.x]; #ifndef __STOCHASTIC_MODE__ __syncthreads(); #endif for (int i = 1; i < TILE_DIM; i <<= 1) sum += g.shfl_down(sum, i); if (threadIdx.x == 0) { int pos = blockIdx.x * TILE_DIM + threadIdx.y; if (pos < width) out[pos] = sum; } } template <typename T> void launch_fuse_transpose_bias_kernel(const T* inp, T* out, int rows, int cols, hipStream_t stream); template <> void launch_fuse_transpose_bias_kernel<float>(const float* inp, float* out, int rows, int cols, hipStream_t stream) { // assert(rows % TILE_DIM == 0); // assert(cols % TILE_DIM == 0); dim3 grid_dim((cols - 1) / TILE_DIM + 1); dim3 block_dim(TILE_DIM, TILE_DIM); hipLaunchKernelGGL(( column_sum_reduce<float>), dim3(grid_dim), dim3(block_dim), 0, stream, inp, out, rows, cols); } template <> void launch_fuse_transpose_bias_kernel<__half>(const __half* inp, __half* out, int rows, int cols, hipStream_t stream) { // assert(rows % TILE_DIM == 0); // assert(cols % TILE_DIM == 0); dim3 grid_dim((cols - 1) / TILE_DIM + 1); dim3 block_dim(TILE_DIM, TILE_DIM); hipLaunchKernelGGL(( column_sum_reduce<__half>), dim3(grid_dim), dim3(block_dim), 0, stream, inp, out, rows, cols); } __global__ void fused_add2_kernel(const int N, float* out, const float* inp1, const float* inp2) { const float4* inp1_4 = reinterpret_cast<const float4*>(inp1); const float4* inp2_4 = reinterpret_cast<const float4*>(inp2); float4* out_4 = reinterpret_cast<float4*>(out); CUDA_1D_KERNEL_LOOP(j, N) { float4 val; float4 inp1_reg = inp1_4[j]; float4 inp2_reg = inp2_4[j]; val.x = inp1_reg.x + inp2_reg.x; val.y = inp1_reg.y + inp2_reg.y; val.z = inp1_reg.z + inp2_reg.z; val.w = inp1_reg.w + inp2_reg.w; out_4[j] = val; } } __global__ void fused_add2_kernel(const int N, __half* out, const __half* inp1, const __half* inp2) { float2 inp1_4; float2 inp2_4; __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); const float2* inp1_arr = reinterpret_cast<const float2*>(inp1); const float2* inp2_arr = reinterpret_cast<const float2*>(inp2); CUDA_1D_KERNEL_LOOP(j, N) { inp1_4 = inp1_arr[j]; inp2_4 = inp2_arr[j]; float2 inp1_h_f_0 = __half22float2(inp1_h[0]); float2 inp1_h_f_1 = __half22float2(inp1_h[1]); float2 inp2_h_f_0 = __half22float2(inp2_h[0]); float2 inp2_h_f_1 = __half22float2(inp2_h[1]); inp1_h_f_0.x += inp2_h_f_0.x; inp1_h_f_0.y += inp2_h_f_0.y; inp1_h_f_1.x += inp2_h_f_1.x; inp1_h_f_1.y += inp2_h_f_1.y; float2 val_f; __half2* val_h = reinterpret_cast<__half2*>(&val_f); val_h[0] = __float22half2_rn(inp1_h_f_0); val_h[1] = __float22half2_rn(inp1_h_f_1); float2* out_4 = reinterpret_cast<float2*>(out); out_4[j] = val_f; } } template <> void launch_fused_add2<float>(float* out, const float* inp1, const float* inp2, int batch_size, int seq_length, int hidden_dim, hipStream_t& stream) { int total_count = batch_size * seq_length * hidden_dim / 4; dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); hipLaunchKernelGGL(( fused_add2_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, total_count, out, inp1, inp2); } template <> void launch_fused_add2<__half>(__half* out, const __half* inp1, const __half* inp2, int batch_size, int seq_length, int hidden_dim, hipStream_t& stream) { int total_count = batch_size * seq_length * hidden_dim / 4; dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); hipLaunchKernelGGL(( fused_add2_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, total_count, out, inp1, inp2); } __global__ void fused_add3_kernel(float* out, const float* inp1, const float* inp2, const float* inp3, int size, int row_stride) { int row = blockIdx.x; int id = threadIdx.x; const float4* inp1_4 = reinterpret_cast<const float4*>(inp1); const float4* inp2_4 = reinterpret_cast<const float4*>(inp2); const float4* inp3_4 = reinterpret_cast<const float4*>(inp3); float4* out_4 = reinterpret_cast<float4*>(out); float4 val; float4 inp1_reg = inp1_4[row * row_stride + id]; float4 inp2_reg = inp2_4[row * row_stride + id]; float4 inp3_reg = inp3_4[row * row_stride + id]; val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x; val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y; val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z; val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w; out_4[row * row_stride + id] = val; } __global__ void fused_add3_kernel(__half* out, const __half* inp1, const __half* inp2, const __half* inp3, int size, int row_stride) { int row = blockIdx.x; int id = threadIdx.x; const float2* inp1_arr = reinterpret_cast<const float2*>(inp1); const float2* inp2_arr = reinterpret_cast<const float2*>(inp2); const float2* inp3_arr = reinterpret_cast<const float2*>(inp3); float2 inp1_4 = inp1_arr[row * row_stride + id]; float2 inp2_4 = inp2_arr[row * row_stride + id]; float2 inp3_4 = inp3_arr[row * row_stride + id]; __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); float2 inp1_h_f_0 = __half22float2(inp1_h[0]); float2 inp1_h_f_1 = __half22float2(inp1_h[1]); float2 inp2_h_f_0 = __half22float2(inp2_h[0]); float2 inp2_h_f_1 = __half22float2(inp2_h[1]); float2 inp3_h_f_0 = __half22float2(inp3_h[0]); float2 inp3_h_f_1 = __half22float2(inp3_h[1]); inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x); inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y); inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x); inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y); float2 val_f; __half2* val_h = reinterpret_cast<__half2*>(&val_f); val_h[0] = __float22half2_rn(inp1_h_f_0); val_h[1] = __float22half2_rn(inp1_h_f_1); float2* out_4 = reinterpret_cast<float2*>(out); out_4[row * row_stride + id] = val_f; } template <> void launch_fused_add3<float>(float* out, const float* inp1, const float* inp2, const float* inp3, int batch_size, int seq_length, int hidden_size, hipStream_t& stream) { dim3 grid_dim(batch_size * seq_length); dim3 block_dim(hidden_size / 4); hipLaunchKernelGGL(( fused_add3_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); } template <> void launch_fused_add3<__half>(__half* out, const __half* inp1, const __half* inp2, const __half* inp3, int batch_size, int seq_length, int hidden_size, hipStream_t& stream) { dim3 grid_dim(batch_size * seq_length); dim3 block_dim(hidden_size / 4); hipLaunchKernelGGL(( fused_add3_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); } __global__ void fused_add4_kernel(float* out, const float* inp1, const float* inp2, const float* inp3, const float* inp4, int size, int row_stride) { int row = blockIdx.x; int id = threadIdx.x; const float4* inp1_4 = reinterpret_cast<const float4*>(inp1); const float4* inp2_4 = reinterpret_cast<const float4*>(inp2); const float4* inp3_4 = reinterpret_cast<const float4*>(inp3); const float4* inp4_4 = reinterpret_cast<const float4*>(inp4); float4* out_4 = reinterpret_cast<float4*>(out); float4 val; float4 inp1_reg = inp1_4[row * row_stride + id]; float4 inp2_reg = inp2_4[row * row_stride + id]; float4 inp3_reg = inp3_4[row * row_stride + id]; float4 inp4_reg = inp4_4[row * row_stride + id]; val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x + inp4_reg.x; val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y + inp4_reg.y; val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z + inp4_reg.z; val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w + inp4_reg.w; out_4[row * row_stride + id] = val; } __global__ void fused_add4_kernel(__half* out, const __half* inp1, const __half* inp2, const __half* inp3, const __half* inp4, int size, int row_stride) { int row = blockIdx.x; int id = threadIdx.x; const float2* inp1_arr = reinterpret_cast<const float2*>(inp1); const float2* inp2_arr = reinterpret_cast<const float2*>(inp2); const float2* inp3_arr = reinterpret_cast<const float2*>(inp3); const float2* inp4_arr = reinterpret_cast<const float2*>(inp4); float2 inp1_4 = inp1_arr[row * row_stride + id]; float2 inp2_4 = inp2_arr[row * row_stride + id]; float2 inp3_4 = inp3_arr[row * row_stride + id]; float2 inp4_4 = inp4_arr[row * row_stride + id]; __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); __half2* inp4_h = reinterpret_cast<__half2*>(&inp4_4); float2 inp1_h_f_0 = __half22float2(inp1_h[0]); float2 inp1_h_f_1 = __half22float2(inp1_h[1]); float2 inp2_h_f_0 = __half22float2(inp2_h[0]); float2 inp2_h_f_1 = __half22float2(inp2_h[1]); float2 inp3_h_f_0 = __half22float2(inp3_h[0]); float2 inp3_h_f_1 = __half22float2(inp3_h[1]); float2 inp4_h_f_0 = __half22float2(inp4_h[0]); float2 inp4_h_f_1 = __half22float2(inp4_h[1]); inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x + inp4_h_f_0.x); inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y + inp4_h_f_0.y); inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x + inp4_h_f_1.x); inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y + inp4_h_f_1.y); float2 val_f; __half2* val_h = reinterpret_cast<__half2*>(&val_f); val_h[0] = __float22half2_rn(inp1_h_f_0); val_h[1] = __float22half2_rn(inp1_h_f_1); float2* out_4 = reinterpret_cast<float2*>(out); out_4[row * row_stride + id] = val_f; } template <> void launch_fused_add4<float>(float* out, const float* inp1, const float* inp2, const float* inp3, const float* inp4, int batch_size, int seq_length, int hidden_size, hipStream_t& stream) { dim3 grid_dim(batch_size * seq_length); dim3 block_dim(hidden_size / 4); hipLaunchKernelGGL(( fused_add4_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); } template <> void launch_fused_add4<__half>(__half* out, const __half* inp1, const __half* inp2, const __half* inp3, const __half* inp4, int batch_size, int seq_length, int hidden_size, hipStream_t& stream) { dim3 grid_dim(batch_size * seq_length); dim3 block_dim(hidden_size / 4); hipLaunchKernelGGL(( fused_add4_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); }
6255ac6963992090783a9c4271ce2d2633c4e330.cu
#include "general_kernels.h" namespace cg = cooperative_groups; template <typename T> __global__ void column_sum_reduce(const T* __restrict__ inp, T* __restrict__ out, int rows, int width) { __shared__ float tile[TILE_DIM][TILE_DIM + 1]; cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b); int idx = blockDim.x * blockIdx.x + threadIdx.x; int y_stride = width * TILE_DIM; float localSum = 0; // Loop across matrix height if (idx < width) { int offset = threadIdx.y * width + idx; for (int r = threadIdx.y; r < rows; r += TILE_DIM) { localSum += (float)inp[offset]; offset += y_stride; } } tile[threadIdx.x][threadIdx.y] = localSum; __syncthreads(); // Sum the shared buffer. float sum = tile[threadIdx.y][threadIdx.x]; #ifndef __STOCHASTIC_MODE__ __syncthreads(); #endif for (int i = 1; i < TILE_DIM; i <<= 1) sum += g.shfl_down(sum, i); if (threadIdx.x == 0) { int pos = blockIdx.x * TILE_DIM + threadIdx.y; if (pos < width) out[pos] = sum; } } template <typename T> void launch_fuse_transpose_bias_kernel(const T* inp, T* out, int rows, int cols, cudaStream_t stream); template <> void launch_fuse_transpose_bias_kernel<float>(const float* inp, float* out, int rows, int cols, cudaStream_t stream) { // assert(rows % TILE_DIM == 0); // assert(cols % TILE_DIM == 0); dim3 grid_dim((cols - 1) / TILE_DIM + 1); dim3 block_dim(TILE_DIM, TILE_DIM); column_sum_reduce<float><<<grid_dim, block_dim, 0, stream>>>(inp, out, rows, cols); } template <> void launch_fuse_transpose_bias_kernel<__half>(const __half* inp, __half* out, int rows, int cols, cudaStream_t stream) { // assert(rows % TILE_DIM == 0); // assert(cols % TILE_DIM == 0); dim3 grid_dim((cols - 1) / TILE_DIM + 1); dim3 block_dim(TILE_DIM, TILE_DIM); column_sum_reduce<__half><<<grid_dim, block_dim, 0, stream>>>(inp, out, rows, cols); } __global__ void fused_add2_kernel(const int N, float* out, const float* inp1, const float* inp2) { const float4* inp1_4 = reinterpret_cast<const float4*>(inp1); const float4* inp2_4 = reinterpret_cast<const float4*>(inp2); float4* out_4 = reinterpret_cast<float4*>(out); CUDA_1D_KERNEL_LOOP(j, N) { float4 val; float4 inp1_reg = inp1_4[j]; float4 inp2_reg = inp2_4[j]; val.x = inp1_reg.x + inp2_reg.x; val.y = inp1_reg.y + inp2_reg.y; val.z = inp1_reg.z + inp2_reg.z; val.w = inp1_reg.w + inp2_reg.w; out_4[j] = val; } } __global__ void fused_add2_kernel(const int N, __half* out, const __half* inp1, const __half* inp2) { float2 inp1_4; float2 inp2_4; __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); const float2* inp1_arr = reinterpret_cast<const float2*>(inp1); const float2* inp2_arr = reinterpret_cast<const float2*>(inp2); CUDA_1D_KERNEL_LOOP(j, N) { inp1_4 = inp1_arr[j]; inp2_4 = inp2_arr[j]; float2 inp1_h_f_0 = __half22float2(inp1_h[0]); float2 inp1_h_f_1 = __half22float2(inp1_h[1]); float2 inp2_h_f_0 = __half22float2(inp2_h[0]); float2 inp2_h_f_1 = __half22float2(inp2_h[1]); inp1_h_f_0.x += inp2_h_f_0.x; inp1_h_f_0.y += inp2_h_f_0.y; inp1_h_f_1.x += inp2_h_f_1.x; inp1_h_f_1.y += inp2_h_f_1.y; float2 val_f; __half2* val_h = reinterpret_cast<__half2*>(&val_f); val_h[0] = __float22half2_rn(inp1_h_f_0); val_h[1] = __float22half2_rn(inp1_h_f_1); float2* out_4 = reinterpret_cast<float2*>(out); out_4[j] = val_f; } } template <> void launch_fused_add2<float>(float* out, const float* inp1, const float* inp2, int batch_size, int seq_length, int hidden_dim, cudaStream_t& stream) { int total_count = batch_size * seq_length * hidden_dim / 4; dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); fused_add2_kernel<<<grid_dim, block_dim, 0, stream>>>(total_count, out, inp1, inp2); } template <> void launch_fused_add2<__half>(__half* out, const __half* inp1, const __half* inp2, int batch_size, int seq_length, int hidden_dim, cudaStream_t& stream) { int total_count = batch_size * seq_length * hidden_dim / 4; dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); fused_add2_kernel<<<grid_dim, block_dim, 0, stream>>>(total_count, out, inp1, inp2); } __global__ void fused_add3_kernel(float* out, const float* inp1, const float* inp2, const float* inp3, int size, int row_stride) { int row = blockIdx.x; int id = threadIdx.x; const float4* inp1_4 = reinterpret_cast<const float4*>(inp1); const float4* inp2_4 = reinterpret_cast<const float4*>(inp2); const float4* inp3_4 = reinterpret_cast<const float4*>(inp3); float4* out_4 = reinterpret_cast<float4*>(out); float4 val; float4 inp1_reg = inp1_4[row * row_stride + id]; float4 inp2_reg = inp2_4[row * row_stride + id]; float4 inp3_reg = inp3_4[row * row_stride + id]; val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x; val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y; val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z; val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w; out_4[row * row_stride + id] = val; } __global__ void fused_add3_kernel(__half* out, const __half* inp1, const __half* inp2, const __half* inp3, int size, int row_stride) { int row = blockIdx.x; int id = threadIdx.x; const float2* inp1_arr = reinterpret_cast<const float2*>(inp1); const float2* inp2_arr = reinterpret_cast<const float2*>(inp2); const float2* inp3_arr = reinterpret_cast<const float2*>(inp3); float2 inp1_4 = inp1_arr[row * row_stride + id]; float2 inp2_4 = inp2_arr[row * row_stride + id]; float2 inp3_4 = inp3_arr[row * row_stride + id]; __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); float2 inp1_h_f_0 = __half22float2(inp1_h[0]); float2 inp1_h_f_1 = __half22float2(inp1_h[1]); float2 inp2_h_f_0 = __half22float2(inp2_h[0]); float2 inp2_h_f_1 = __half22float2(inp2_h[1]); float2 inp3_h_f_0 = __half22float2(inp3_h[0]); float2 inp3_h_f_1 = __half22float2(inp3_h[1]); inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x); inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y); inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x); inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y); float2 val_f; __half2* val_h = reinterpret_cast<__half2*>(&val_f); val_h[0] = __float22half2_rn(inp1_h_f_0); val_h[1] = __float22half2_rn(inp1_h_f_1); float2* out_4 = reinterpret_cast<float2*>(out); out_4[row * row_stride + id] = val_f; } template <> void launch_fused_add3<float>(float* out, const float* inp1, const float* inp2, const float* inp3, int batch_size, int seq_length, int hidden_size, cudaStream_t& stream) { dim3 grid_dim(batch_size * seq_length); dim3 block_dim(hidden_size / 4); fused_add3_kernel<<<grid_dim, block_dim, 0, stream>>>( out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); } template <> void launch_fused_add3<__half>(__half* out, const __half* inp1, const __half* inp2, const __half* inp3, int batch_size, int seq_length, int hidden_size, cudaStream_t& stream) { dim3 grid_dim(batch_size * seq_length); dim3 block_dim(hidden_size / 4); fused_add3_kernel<<<grid_dim, block_dim, 0, stream>>>( out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); } __global__ void fused_add4_kernel(float* out, const float* inp1, const float* inp2, const float* inp3, const float* inp4, int size, int row_stride) { int row = blockIdx.x; int id = threadIdx.x; const float4* inp1_4 = reinterpret_cast<const float4*>(inp1); const float4* inp2_4 = reinterpret_cast<const float4*>(inp2); const float4* inp3_4 = reinterpret_cast<const float4*>(inp3); const float4* inp4_4 = reinterpret_cast<const float4*>(inp4); float4* out_4 = reinterpret_cast<float4*>(out); float4 val; float4 inp1_reg = inp1_4[row * row_stride + id]; float4 inp2_reg = inp2_4[row * row_stride + id]; float4 inp3_reg = inp3_4[row * row_stride + id]; float4 inp4_reg = inp4_4[row * row_stride + id]; val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x + inp4_reg.x; val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y + inp4_reg.y; val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z + inp4_reg.z; val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w + inp4_reg.w; out_4[row * row_stride + id] = val; } __global__ void fused_add4_kernel(__half* out, const __half* inp1, const __half* inp2, const __half* inp3, const __half* inp4, int size, int row_stride) { int row = blockIdx.x; int id = threadIdx.x; const float2* inp1_arr = reinterpret_cast<const float2*>(inp1); const float2* inp2_arr = reinterpret_cast<const float2*>(inp2); const float2* inp3_arr = reinterpret_cast<const float2*>(inp3); const float2* inp4_arr = reinterpret_cast<const float2*>(inp4); float2 inp1_4 = inp1_arr[row * row_stride + id]; float2 inp2_4 = inp2_arr[row * row_stride + id]; float2 inp3_4 = inp3_arr[row * row_stride + id]; float2 inp4_4 = inp4_arr[row * row_stride + id]; __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); __half2* inp4_h = reinterpret_cast<__half2*>(&inp4_4); float2 inp1_h_f_0 = __half22float2(inp1_h[0]); float2 inp1_h_f_1 = __half22float2(inp1_h[1]); float2 inp2_h_f_0 = __half22float2(inp2_h[0]); float2 inp2_h_f_1 = __half22float2(inp2_h[1]); float2 inp3_h_f_0 = __half22float2(inp3_h[0]); float2 inp3_h_f_1 = __half22float2(inp3_h[1]); float2 inp4_h_f_0 = __half22float2(inp4_h[0]); float2 inp4_h_f_1 = __half22float2(inp4_h[1]); inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x + inp4_h_f_0.x); inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y + inp4_h_f_0.y); inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x + inp4_h_f_1.x); inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y + inp4_h_f_1.y); float2 val_f; __half2* val_h = reinterpret_cast<__half2*>(&val_f); val_h[0] = __float22half2_rn(inp1_h_f_0); val_h[1] = __float22half2_rn(inp1_h_f_1); float2* out_4 = reinterpret_cast<float2*>(out); out_4[row * row_stride + id] = val_f; } template <> void launch_fused_add4<float>(float* out, const float* inp1, const float* inp2, const float* inp3, const float* inp4, int batch_size, int seq_length, int hidden_size, cudaStream_t& stream) { dim3 grid_dim(batch_size * seq_length); dim3 block_dim(hidden_size / 4); fused_add4_kernel<<<grid_dim, block_dim, 0, stream>>>( out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); } template <> void launch_fused_add4<__half>(__half* out, const __half* inp1, const __half* inp2, const __half* inp3, const __half* inp4, int batch_size, int seq_length, int hidden_size, cudaStream_t& stream) { dim3 grid_dim(batch_size * seq_length); dim3 block_dim(hidden_size / 4); fused_add4_kernel<<<grid_dim, block_dim, 0, stream>>>( out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); }
29edc5c038a085da1934b5f527c20df69fa73f64.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file multibox_detection.cu * \brief MultiBoxDetection op * \author Joshua Zhang */ #include "./multibox_detection-inl.h" #include <mshadow/cuda/tensor_gpu-inl.cuh> #define MULTIBOX_DETECTION_CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { template<typename DType> __device__ void Clip(DType *value, const DType lower, const DType upper) { if ((*value) < lower) *value = lower; if ((*value) > upper) *value = upper; } template<typename DType> __device__ void CalculateOverlap(const DType *a, const DType *b, DType *iou) { DType w = max(DType(0), min(a[2], b[2]) - max(a[0], b[0])); DType h = max(DType(0), min(a[3], b[3]) - max(a[1], b[1])); DType i = w * h; DType u = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] - b[1]) - i; (*iou) = u <= 0.f ? static_cast<DType>(0) : static_cast<DType>(i / u); } template<typename DType> __global__ void DetectionForwardKernel(DType *out, const DType *cls_prob, const DType *loc_pred, const DType *anchors, DType *temp_space, const int num_classes, const int num_anchors, const float threshold, const bool clip, const float vx, const float vy, const float vw, const float vh, const float nms_threshold, const bool force_suppress, const int nms_topk) { const int nbatch = blockIdx.x; // each block for each batch int index = threadIdx.x; __shared__ int valid_count; out += nbatch * num_anchors * 6; cls_prob += nbatch * num_anchors * num_classes; loc_pred += nbatch * num_anchors * 4; if (index == 0) { valid_count = 0; } __syncthreads(); // apply prediction to anchors for (int i = index; i < num_anchors; i += blockDim.x) { DType score = -1; int id = 0; for (int j = 1; j < num_classes; ++j) { DType temp = cls_prob[j * num_anchors + i]; if (temp > score) { score = temp; id = j; } } if (id > 0 && score < threshold) { id = 0; } if (id > 0) { // valid class int pos = atomicAdd(&valid_count, 1); out[pos * 6] = id - 1; // restore original class id out[pos * 6 + 1] = (id == 0 ? DType(-1) : score); int offset = i * 4; DType al = anchors[offset]; DType at = anchors[offset + 1]; DType ar = anchors[offset + 2]; DType ab = anchors[offset + 3]; DType aw = ar - al; DType ah = ab - at; DType ax = (al + ar) / 2.f; DType ay = (at + ab) / 2.f; DType ox = loc_pred[offset] * vx * aw + ax; DType oy = loc_pred[offset + 1] * vy * ah + ay; DType ow = exp(loc_pred[offset + 2] * vw) * aw / 2; DType oh = exp(loc_pred[offset + 3] * vh) * ah / 2; DType xmin = ox - ow; DType ymin = oy - oh; DType xmax = ox + ow; DType ymax = oy + oh; if (clip) { Clip(&xmin, DType(0), DType(1)); Clip(&ymin, DType(0), DType(1)); Clip(&xmax, DType(0), DType(1)); Clip(&ymax, DType(0), DType(1)); } out[pos * 6 + 2] = xmin; out[pos * 6 + 3] = ymin; out[pos * 6 + 4] = xmax; out[pos * 6 + 5] = ymax; } } __syncthreads(); if (valid_count < 1 || nms_threshold <= 0 || nms_threshold > 1) return; // if (index == 0) printf("%d\n", valid_count); // descent sort according to scores const int size = valid_count; temp_space += nbatch * num_anchors * 6; DType *src = out; DType *dst = temp_space; for (int width = 2; width < (size << 1); width <<= 1) { int slices = (size - 1) / (blockDim.x * width) + 1; int start = width * index * slices; for (int slice = 0; slice < slices; ++slice) { if (start >= size) break; int middle = start + (width >> 1); if (middle > size) middle = size; int end = start + width; if (end > size) end = size; int i = start; int j = middle; for (int k = start; k < end; ++k) { DType score_i = i < size ? src[i * 6 + 1] : DType(-1); DType score_j = j < size ? src[j * 6 + 1] : DType(-1); if (i < middle && (j >= end || score_i > score_j)) { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[i * 6 + n]; } ++i; } else { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[j * 6 + n]; } ++j; } } start += width; } __syncthreads(); src = src == out? temp_space : out; dst = dst == out? temp_space : out; } __syncthreads(); if (src == temp_space) { // copy from temp to out for (int i = index; i < size * 6; i += blockDim.x) { out[i] = temp_space[i]; } __syncthreads(); } // keep top k detections int ntop = size; if (nms_topk > 0 && nms_topk < ntop) { ntop = nms_topk; for (int i = ntop + index; i < size; i += blockDim.x) { out[i * 6] = -1; } __syncthreads(); } // apply NMS for (int compare_pos = 0; compare_pos < ntop; ++compare_pos) { DType compare_id = out[compare_pos * 6]; if (compare_id < 0) continue; // not a valid positive detection, skip DType *compare_loc_ptr = out + compare_pos * 6 + 2; for (int i = compare_pos + index + 1; i < ntop; i += blockDim.x) { DType class_id = out[i * 6]; if (class_id < 0) continue; if (force_suppress || (class_id == compare_id)) { DType iou; CalculateOverlap(compare_loc_ptr, out + i * 6 + 2, &iou); if (iou >= nms_threshold) { out[i * 6] = -1; } } } __syncthreads(); } } } // namespace cuda template<typename DType> inline void MultiBoxDetectionForward(const Tensor<gpu, 3, DType> &out, const Tensor<gpu, 3, DType> &cls_prob, const Tensor<gpu, 2, DType> &loc_pred, const Tensor<gpu, 2, DType> &anchors, const Tensor<gpu, 3, DType> &temp_space, const float threshold, const bool clip, const nnvm::Tuple<float> &variances, const float nms_threshold, const bool force_suppress, const int nms_topk) { CHECK_EQ(variances.ndim(), 4) << "Variance size must be 4"; const int num_classes = cls_prob.size(1); const int num_anchors = cls_prob.size(2); const int num_batches = cls_prob.size(0); const int num_threads = cuda::kMaxThreadsPerBlock; int num_blocks = num_batches; cuda::CheckLaunchParam(num_blocks, num_threads, "MultiBoxDetection Forward"); hipStream_t stream = Stream<gpu>::GetStream(out.stream_); hipLaunchKernelGGL(( cuda::DetectionForwardKernel), dim3(num_blocks), dim3(num_threads), 0, stream, out.dptr_, cls_prob.dptr_, loc_pred.dptr_, anchors.dptr_, temp_space.dptr_, num_classes, num_anchors, threshold, clip, variances[0], variances[1], variances[2], variances[3], nms_threshold, force_suppress, nms_topk); MULTIBOX_DETECTION_CUDA_CHECK(hipPeekAtLastError()); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator *CreateOp<gpu>(MultiBoxDetectionParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new MultiBoxDetectionOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
29edc5c038a085da1934b5f527c20df69fa73f64.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file multibox_detection.cu * \brief MultiBoxDetection op * \author Joshua Zhang */ #include "./multibox_detection-inl.h" #include <mshadow/cuda/tensor_gpu-inl.cuh> #define MULTIBOX_DETECTION_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { template<typename DType> __device__ void Clip(DType *value, const DType lower, const DType upper) { if ((*value) < lower) *value = lower; if ((*value) > upper) *value = upper; } template<typename DType> __device__ void CalculateOverlap(const DType *a, const DType *b, DType *iou) { DType w = max(DType(0), min(a[2], b[2]) - max(a[0], b[0])); DType h = max(DType(0), min(a[3], b[3]) - max(a[1], b[1])); DType i = w * h; DType u = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] - b[1]) - i; (*iou) = u <= 0.f ? static_cast<DType>(0) : static_cast<DType>(i / u); } template<typename DType> __global__ void DetectionForwardKernel(DType *out, const DType *cls_prob, const DType *loc_pred, const DType *anchors, DType *temp_space, const int num_classes, const int num_anchors, const float threshold, const bool clip, const float vx, const float vy, const float vw, const float vh, const float nms_threshold, const bool force_suppress, const int nms_topk) { const int nbatch = blockIdx.x; // each block for each batch int index = threadIdx.x; __shared__ int valid_count; out += nbatch * num_anchors * 6; cls_prob += nbatch * num_anchors * num_classes; loc_pred += nbatch * num_anchors * 4; if (index == 0) { valid_count = 0; } __syncthreads(); // apply prediction to anchors for (int i = index; i < num_anchors; i += blockDim.x) { DType score = -1; int id = 0; for (int j = 1; j < num_classes; ++j) { DType temp = cls_prob[j * num_anchors + i]; if (temp > score) { score = temp; id = j; } } if (id > 0 && score < threshold) { id = 0; } if (id > 0) { // valid class int pos = atomicAdd(&valid_count, 1); out[pos * 6] = id - 1; // restore original class id out[pos * 6 + 1] = (id == 0 ? DType(-1) : score); int offset = i * 4; DType al = anchors[offset]; DType at = anchors[offset + 1]; DType ar = anchors[offset + 2]; DType ab = anchors[offset + 3]; DType aw = ar - al; DType ah = ab - at; DType ax = (al + ar) / 2.f; DType ay = (at + ab) / 2.f; DType ox = loc_pred[offset] * vx * aw + ax; DType oy = loc_pred[offset + 1] * vy * ah + ay; DType ow = exp(loc_pred[offset + 2] * vw) * aw / 2; DType oh = exp(loc_pred[offset + 3] * vh) * ah / 2; DType xmin = ox - ow; DType ymin = oy - oh; DType xmax = ox + ow; DType ymax = oy + oh; if (clip) { Clip(&xmin, DType(0), DType(1)); Clip(&ymin, DType(0), DType(1)); Clip(&xmax, DType(0), DType(1)); Clip(&ymax, DType(0), DType(1)); } out[pos * 6 + 2] = xmin; out[pos * 6 + 3] = ymin; out[pos * 6 + 4] = xmax; out[pos * 6 + 5] = ymax; } } __syncthreads(); if (valid_count < 1 || nms_threshold <= 0 || nms_threshold > 1) return; // if (index == 0) printf("%d\n", valid_count); // descent sort according to scores const int size = valid_count; temp_space += nbatch * num_anchors * 6; DType *src = out; DType *dst = temp_space; for (int width = 2; width < (size << 1); width <<= 1) { int slices = (size - 1) / (blockDim.x * width) + 1; int start = width * index * slices; for (int slice = 0; slice < slices; ++slice) { if (start >= size) break; int middle = start + (width >> 1); if (middle > size) middle = size; int end = start + width; if (end > size) end = size; int i = start; int j = middle; for (int k = start; k < end; ++k) { DType score_i = i < size ? src[i * 6 + 1] : DType(-1); DType score_j = j < size ? src[j * 6 + 1] : DType(-1); if (i < middle && (j >= end || score_i > score_j)) { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[i * 6 + n]; } ++i; } else { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[j * 6 + n]; } ++j; } } start += width; } __syncthreads(); src = src == out? temp_space : out; dst = dst == out? temp_space : out; } __syncthreads(); if (src == temp_space) { // copy from temp to out for (int i = index; i < size * 6; i += blockDim.x) { out[i] = temp_space[i]; } __syncthreads(); } // keep top k detections int ntop = size; if (nms_topk > 0 && nms_topk < ntop) { ntop = nms_topk; for (int i = ntop + index; i < size; i += blockDim.x) { out[i * 6] = -1; } __syncthreads(); } // apply NMS for (int compare_pos = 0; compare_pos < ntop; ++compare_pos) { DType compare_id = out[compare_pos * 6]; if (compare_id < 0) continue; // not a valid positive detection, skip DType *compare_loc_ptr = out + compare_pos * 6 + 2; for (int i = compare_pos + index + 1; i < ntop; i += blockDim.x) { DType class_id = out[i * 6]; if (class_id < 0) continue; if (force_suppress || (class_id == compare_id)) { DType iou; CalculateOverlap(compare_loc_ptr, out + i * 6 + 2, &iou); if (iou >= nms_threshold) { out[i * 6] = -1; } } } __syncthreads(); } } } // namespace cuda template<typename DType> inline void MultiBoxDetectionForward(const Tensor<gpu, 3, DType> &out, const Tensor<gpu, 3, DType> &cls_prob, const Tensor<gpu, 2, DType> &loc_pred, const Tensor<gpu, 2, DType> &anchors, const Tensor<gpu, 3, DType> &temp_space, const float threshold, const bool clip, const nnvm::Tuple<float> &variances, const float nms_threshold, const bool force_suppress, const int nms_topk) { CHECK_EQ(variances.ndim(), 4) << "Variance size must be 4"; const int num_classes = cls_prob.size(1); const int num_anchors = cls_prob.size(2); const int num_batches = cls_prob.size(0); const int num_threads = cuda::kMaxThreadsPerBlock; int num_blocks = num_batches; cuda::CheckLaunchParam(num_blocks, num_threads, "MultiBoxDetection Forward"); cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); cuda::DetectionForwardKernel<<<num_blocks, num_threads, 0, stream>>>(out.dptr_, cls_prob.dptr_, loc_pred.dptr_, anchors.dptr_, temp_space.dptr_, num_classes, num_anchors, threshold, clip, variances[0], variances[1], variances[2], variances[3], nms_threshold, force_suppress, nms_topk); MULTIBOX_DETECTION_CUDA_CHECK(cudaPeekAtLastError()); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator *CreateOp<gpu>(MultiBoxDetectionParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new MultiBoxDetectionOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
bad9c2d00cfe6815d7e27fc7079916e53f7de58f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> __device__ void warpReduce(volatile int* sdata, int tid) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } __global__ void reduce2(int *g_idata, int *g_odata) { extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if (tid < 32) warpReduce(sdata, tid); if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } int main(void) { int N = 1<<28; int *input, *output; hipMallocManaged(&input, N * sizeof(int)); hipMallocManaged(&output, N * sizeof(int)); for (int i = 0; i < N; i++) { input[i] = 2; output[i] = 0; } int blockSize = 128; int numBlocks = (N + blockSize - 1) / blockSize; int smemSize = blockSize * sizeof(int); hipLaunchKernelGGL(( reduce2), dim3(numBlocks), dim3(blockSize), smemSize, 0, input, output); hipDeviceSynchronize(); int final_result = 0; for (int i = 0; i < numBlocks; i++) { final_result += output[i]; } std::cout << "final result = " << final_result << "\n"; // Free memory hipFree(input); hipFree(output); return 0; }
bad9c2d00cfe6815d7e27fc7079916e53f7de58f.cu
#include <iostream> #include <math.h> __device__ void warpReduce(volatile int* sdata, int tid) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } __global__ void reduce2(int *g_idata, int *g_odata) { extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if (tid < 32) warpReduce(sdata, tid); if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } int main(void) { int N = 1<<28; int *input, *output; cudaMallocManaged(&input, N * sizeof(int)); cudaMallocManaged(&output, N * sizeof(int)); for (int i = 0; i < N; i++) { input[i] = 2; output[i] = 0; } int blockSize = 128; int numBlocks = (N + blockSize - 1) / blockSize; int smemSize = blockSize * sizeof(int); reduce2<<<numBlocks, blockSize, smemSize>>>(input, output); cudaDeviceSynchronize(); int final_result = 0; for (int i = 0; i < numBlocks; i++) { final_result += output[i]; } std::cout << "final result = " << final_result << "\n"; // Free memory cudaFree(input); cudaFree(output); return 0; }
b9c45be4b31cd992b1dccb70d799bd28228fae1b.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cstdlib> #include <hip/hip_runtime.h> #include <tune_quda.h> #include <gauge_field.h> #include <cassert> #include <jitify_helper.cuh> #include <kernels/clover_deriv.cuh> /** @file clover_deriv_quda.cu @brief This kernel has been a bit of a pain to optimize since it is excessively register bound. To reduce register pressure we use shared memory to help offload some of this pressure. Annoyingly, the optimal approach for CUDA 8.0 is not the same as CUDA 7.5, so implementation is compiler version dependent. The CUDA 8.0 optimal code runs 10x slower on 7.5, though the 7.5 code runs fine on 8.0. CUDA >= 8.0 - Used shared memory for force accumulator matrix - Template mu / nu to prevent register spilling of indexing arrays - Force the computation routine to inline CUDA <= 7.5 - Used shared memory for force accumulator matrix - Keep mu/nu dynamic and use shared memory to store indexing arrays - Do not inline computation routine For the shared-memory dynamic indexing arrays, we use chars, since the array is 4-d, a 4-d coordinate can be stored in a single word which means that we will not have to worry about bank conflicts, and the shared array can be passed to the usual indexing routines (getCoordsExtended and linkIndexShift) with no code changes. This strategy works as long as each local lattice coordinate is less than 256. */ namespace quda { #ifdef GPU_CLOVER_DIRAC template<typename Float, typename Arg> class CloverDerivative : public TunableVectorY { private: Arg arg; const GaugeField &meta; #if defined(SHARED_ACCUMULATOR) && defined(SHARED_ARRAY) unsigned int sharedBytesPerThread() const { return 18*sizeof(Float) + 8; } #elif defined(SHARED_ACCUMULATOR) unsigned int sharedBytesPerThread() const { return 18*sizeof(Float); } #else unsigned int sharedBytesPerThread() const { return 0; } #endif unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; } unsigned int minThreads() const { return arg.volumeCB; } bool tuneGridDim() const { return false; } // don't tune the grid dimension public: CloverDerivative(const Arg &arg, const GaugeField &meta) : TunableVectorY(2), arg(arg), meta(meta) { writeAuxString("threads=%d,prec=%lu,fstride=%d,gstride=%d,ostride=%d", arg.volumeCB,sizeof(Float),arg.force.stride, arg.gauge.stride,arg.oprod.stride); #ifdef JITIFY create_jitify_program("kernels/clover_deriv.cuh"); #endif } virtual ~CloverDerivative() {} void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef JITIFY using namespace jitify::reflection; jitify_error = program->kernel("quda::cloverDerivativeKernel") .instantiate(Type<Float>(), Type<Arg>()) .configure(tp.grid, tp.block, tp.shared_bytes, stream) .launch(arg); #else hipLaunchKernelGGL(( cloverDerivativeKernel<Float>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, 0, arg); #endif } // apply bool advanceBlockDim(TuneParam &param) const { dim3 block = param.block; dim3 grid = param.grid; bool rtn = TunableVectorY::advanceBlockDim(param); param.block.z = block.z; param.grid.z = grid.z; if (!rtn) { if (param.block.z < 4) { param.block.z++; param.grid.z = (4 + param.block.z - 1) / param.block.z; rtn = true; } else { param.block.z = 1; param.grid.z = 4; rtn = false; } } return rtn; } void initTuneParam(TuneParam &param) const { TunableVectorY::initTuneParam(param); param.block.y = 1; param.block.z = 1; param.grid.y = 2; param.grid.z = 4; } void defaultTuneParam(TuneParam &param) const { initTuneParam(param); } // The force field is updated so we must preserve its initial state void preTune() { arg.force.save(); } void postTune() { arg.force.load(); } long long flops() const { return 16 * 198 * 3 * 4 * 2 * (long long)arg.volumeCB; } long long bytes() const { return ((8*arg.gauge.Bytes() + 4*arg.oprod.Bytes())*3 + 2*arg.force.Bytes()) * 4 * 2 * arg.volumeCB; } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } }; template<typename Float> void cloverDerivative(cudaGaugeField &force, cudaGaugeField &gauge, cudaGaugeField &oprod, double coeff, int parity) { if (oprod.Reconstruct() != QUDA_RECONSTRUCT_NO) errorQuda("Force field does not support reconstruction"); if (force.Order() != oprod.Order()) errorQuda("Force and Oprod orders must match"); if (force.Reconstruct() != QUDA_RECONSTRUCT_NO) errorQuda("Force field does not support reconstruction"); if (force.Order() == QUDA_FLOAT2_GAUGE_ORDER) { typedef gauge::FloatNOrder<Float, 18, 2, 18> F; typedef gauge::FloatNOrder<Float, 18, 2, 18> O; if (gauge.isNative()) { if (gauge.Reconstruct() == QUDA_RECONSTRUCT_NO) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; typedef CloverDerivArg<Float,F,G,O> Arg; Arg arg(F(force), G(gauge), O(oprod), force.X(), oprod.X(), coeff, parity); CloverDerivative<Float, Arg> deriv(arg, gauge); deriv.apply(0); #if 0 } else if (gauge.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G; typedef CloverDerivArg<Float,F,G,O> Arg; Arg arg(F(force), G(gauge), O(oprod), force.X(), oprod.X(), coeff, parity); CloverDerivative<Float, Arg> deriv(arg, gauge); deriv.apply(0); #endif } else { errorQuda("Reconstruction type %d not supported",gauge.Reconstruct()); } } else { errorQuda("Gauge order %d not supported", gauge.Order()); } } else { errorQuda("Force order %d not supported", force.Order()); } // force / oprod order qudaDeviceSynchronize(); } #endif // GPU_CLOVER void cloverDerivative( cudaGaugeField &force, cudaGaugeField &gauge, cudaGaugeField &oprod, double coeff, QudaParity parity) { #ifdef GPU_CLOVER_DIRAC assert(oprod.Geometry() == QUDA_TENSOR_GEOMETRY); assert(force.Geometry() == QUDA_VECTOR_GEOMETRY); for (int d=0; d<4; d++) { if (oprod.X()[d] != gauge.X()[d]) errorQuda("Incompatible extended dimensions d=%d gauge=%d oprod=%d", d, gauge.X()[d], oprod.X()[d]); } int device_parity = (parity == QUDA_EVEN_PARITY) ? 0 : 1; if(force.Precision() == QUDA_DOUBLE_PRECISION){ cloverDerivative<double>(force, gauge, oprod, coeff, device_parity); #if 0 } else if (force.Precision() == QUDA_SINGLE_PRECISION){ cloverDerivative<float>(force, gauge, oprod, coeff, device_parity); #endif } else { errorQuda("Precision %d not supported", force.Precision()); } return; #else errorQuda("Clover has not been built"); #endif } } // namespace quda
b9c45be4b31cd992b1dccb70d799bd28228fae1b.cu
#include <cstdio> #include <cstdlib> #include <cuda.h> #include <tune_quda.h> #include <gauge_field.h> #include <cassert> #include <jitify_helper.cuh> #include <kernels/clover_deriv.cuh> /** @file clover_deriv_quda.cu @brief This kernel has been a bit of a pain to optimize since it is excessively register bound. To reduce register pressure we use shared memory to help offload some of this pressure. Annoyingly, the optimal approach for CUDA 8.0 is not the same as CUDA 7.5, so implementation is compiler version dependent. The CUDA 8.0 optimal code runs 10x slower on 7.5, though the 7.5 code runs fine on 8.0. CUDA >= 8.0 - Used shared memory for force accumulator matrix - Template mu / nu to prevent register spilling of indexing arrays - Force the computation routine to inline CUDA <= 7.5 - Used shared memory for force accumulator matrix - Keep mu/nu dynamic and use shared memory to store indexing arrays - Do not inline computation routine For the shared-memory dynamic indexing arrays, we use chars, since the array is 4-d, a 4-d coordinate can be stored in a single word which means that we will not have to worry about bank conflicts, and the shared array can be passed to the usual indexing routines (getCoordsExtended and linkIndexShift) with no code changes. This strategy works as long as each local lattice coordinate is less than 256. */ namespace quda { #ifdef GPU_CLOVER_DIRAC template<typename Float, typename Arg> class CloverDerivative : public TunableVectorY { private: Arg arg; const GaugeField &meta; #if defined(SHARED_ACCUMULATOR) && defined(SHARED_ARRAY) unsigned int sharedBytesPerThread() const { return 18*sizeof(Float) + 8; } #elif defined(SHARED_ACCUMULATOR) unsigned int sharedBytesPerThread() const { return 18*sizeof(Float); } #else unsigned int sharedBytesPerThread() const { return 0; } #endif unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; } unsigned int minThreads() const { return arg.volumeCB; } bool tuneGridDim() const { return false; } // don't tune the grid dimension public: CloverDerivative(const Arg &arg, const GaugeField &meta) : TunableVectorY(2), arg(arg), meta(meta) { writeAuxString("threads=%d,prec=%lu,fstride=%d,gstride=%d,ostride=%d", arg.volumeCB,sizeof(Float),arg.force.stride, arg.gauge.stride,arg.oprod.stride); #ifdef JITIFY create_jitify_program("kernels/clover_deriv.cuh"); #endif } virtual ~CloverDerivative() {} void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef JITIFY using namespace jitify::reflection; jitify_error = program->kernel("quda::cloverDerivativeKernel") .instantiate(Type<Float>(), Type<Arg>()) .configure(tp.grid, tp.block, tp.shared_bytes, stream) .launch(arg); #else cloverDerivativeKernel<Float><<<tp.grid,tp.block,tp.shared_bytes>>>(arg); #endif } // apply bool advanceBlockDim(TuneParam &param) const { dim3 block = param.block; dim3 grid = param.grid; bool rtn = TunableVectorY::advanceBlockDim(param); param.block.z = block.z; param.grid.z = grid.z; if (!rtn) { if (param.block.z < 4) { param.block.z++; param.grid.z = (4 + param.block.z - 1) / param.block.z; rtn = true; } else { param.block.z = 1; param.grid.z = 4; rtn = false; } } return rtn; } void initTuneParam(TuneParam &param) const { TunableVectorY::initTuneParam(param); param.block.y = 1; param.block.z = 1; param.grid.y = 2; param.grid.z = 4; } void defaultTuneParam(TuneParam &param) const { initTuneParam(param); } // The force field is updated so we must preserve its initial state void preTune() { arg.force.save(); } void postTune() { arg.force.load(); } long long flops() const { return 16 * 198 * 3 * 4 * 2 * (long long)arg.volumeCB; } long long bytes() const { return ((8*arg.gauge.Bytes() + 4*arg.oprod.Bytes())*3 + 2*arg.force.Bytes()) * 4 * 2 * arg.volumeCB; } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } }; template<typename Float> void cloverDerivative(cudaGaugeField &force, cudaGaugeField &gauge, cudaGaugeField &oprod, double coeff, int parity) { if (oprod.Reconstruct() != QUDA_RECONSTRUCT_NO) errorQuda("Force field does not support reconstruction"); if (force.Order() != oprod.Order()) errorQuda("Force and Oprod orders must match"); if (force.Reconstruct() != QUDA_RECONSTRUCT_NO) errorQuda("Force field does not support reconstruction"); if (force.Order() == QUDA_FLOAT2_GAUGE_ORDER) { typedef gauge::FloatNOrder<Float, 18, 2, 18> F; typedef gauge::FloatNOrder<Float, 18, 2, 18> O; if (gauge.isNative()) { if (gauge.Reconstruct() == QUDA_RECONSTRUCT_NO) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; typedef CloverDerivArg<Float,F,G,O> Arg; Arg arg(F(force), G(gauge), O(oprod), force.X(), oprod.X(), coeff, parity); CloverDerivative<Float, Arg> deriv(arg, gauge); deriv.apply(0); #if 0 } else if (gauge.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G; typedef CloverDerivArg<Float,F,G,O> Arg; Arg arg(F(force), G(gauge), O(oprod), force.X(), oprod.X(), coeff, parity); CloverDerivative<Float, Arg> deriv(arg, gauge); deriv.apply(0); #endif } else { errorQuda("Reconstruction type %d not supported",gauge.Reconstruct()); } } else { errorQuda("Gauge order %d not supported", gauge.Order()); } } else { errorQuda("Force order %d not supported", force.Order()); } // force / oprod order qudaDeviceSynchronize(); } #endif // GPU_CLOVER void cloverDerivative( cudaGaugeField &force, cudaGaugeField &gauge, cudaGaugeField &oprod, double coeff, QudaParity parity) { #ifdef GPU_CLOVER_DIRAC assert(oprod.Geometry() == QUDA_TENSOR_GEOMETRY); assert(force.Geometry() == QUDA_VECTOR_GEOMETRY); for (int d=0; d<4; d++) { if (oprod.X()[d] != gauge.X()[d]) errorQuda("Incompatible extended dimensions d=%d gauge=%d oprod=%d", d, gauge.X()[d], oprod.X()[d]); } int device_parity = (parity == QUDA_EVEN_PARITY) ? 0 : 1; if(force.Precision() == QUDA_DOUBLE_PRECISION){ cloverDerivative<double>(force, gauge, oprod, coeff, device_parity); #if 0 } else if (force.Precision() == QUDA_SINGLE_PRECISION){ cloverDerivative<float>(force, gauge, oprod, coeff, device_parity); #endif } else { errorQuda("Precision %d not supported", force.Precision()); } return; #else errorQuda("Clover has not been built"); #endif } } // namespace quda
8439857152630fee7efdfa6399b2d4b4c2a563c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: jglaser #include "ParticleData.cuh" /*! \file ParticleData.cu \brief ImplementsGPU kernel code and data structure functions used by ParticleData */ #ifdef ENABLE_MPI #include <iterator> #include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/scatter.h> #include <thrust/device_ptr.h> #include "hoomd/extern/kernels/scan.cuh" //! Kernel to partition particle data __global__ void gpu_scatter_particle_data_kernel( const unsigned int N, const Scalar4 *d_pos, const Scalar4 *d_vel, const Scalar3 *d_accel, const Scalar *d_charge, const Scalar *d_diameter, const int3 *d_image, const unsigned int *d_body, const Scalar4 *d_orientation, const Scalar4 *d_angmom, const Scalar3 *d_inertia, const unsigned int *d_tag, unsigned int *d_rtag, Scalar4 *d_pos_alt, Scalar4 *d_vel_alt, Scalar3 *d_accel_alt, Scalar *d_charge_alt, Scalar *d_diameter_alt, int3 *d_image_alt, unsigned int *d_body_alt, Scalar4 *d_orientation_alt, Scalar4 *d_angmom_alt, Scalar3 *d_inertia_alt, unsigned int *d_tag_alt, pdata_element *d_out, unsigned int *d_comm_flags, unsigned int *d_comm_flags_out, const unsigned int *d_scan) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= N) return; bool remove = d_comm_flags[idx]; unsigned int scan_remove = d_scan[idx]; unsigned int scan_keep = idx - scan_remove; if (remove) { pdata_element p; p.pos = d_pos[idx]; p.vel = d_vel[idx]; p.accel = d_accel[idx]; p.charge = d_charge[idx]; p.diameter = d_diameter[idx]; p.image = d_image[idx]; p.body = d_body[idx]; p.orientation = d_orientation[idx]; p.angmom = d_angmom[idx]; p.inertia = d_inertia[idx]; p.tag = d_tag[idx]; d_out[scan_remove] = p; d_comm_flags_out[scan_remove] = d_comm_flags[idx]; // reset communication flags d_comm_flags[idx] = 0; // reset rtag d_rtag[p.tag] = NOT_LOCAL; } else { d_pos_alt[scan_keep] = d_pos[idx]; d_vel_alt[scan_keep] = d_vel[idx]; d_accel_alt[scan_keep] = d_accel[idx]; d_charge_alt[scan_keep] = d_charge[idx]; d_diameter_alt[scan_keep] = d_diameter[idx]; d_image_alt[scan_keep] = d_image[idx]; d_body_alt[scan_keep] = d_body[idx]; d_orientation_alt[scan_keep] = d_orientation[idx]; d_angmom_alt[scan_keep] = d_angmom[idx]; d_inertia_alt[scan_keep] = d_inertia[idx]; unsigned int tag = d_tag[idx]; d_tag_alt[scan_keep] = tag; // update rtag d_rtag[tag] = scan_keep; } } __global__ void gpu_select_sent_particles( unsigned int N, unsigned int *d_comm_flags, unsigned int *d_tmp) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= N) return; d_tmp[idx] = d_comm_flags[idx] ? 1 : 0; } /*! \param N Number of local particles \param d_pos Device array of particle positions \param d_vel Device array of particle velocities \param d_accel Device array of particle accelerations \param d_charge Device array of particle charges \param d_diameter Device array of particle diameters \param d_image Device array of particle images \param d_body Device array of particle body tags \param d_orientation Device array of particle orientations \param d_angmom Device array of particle angular momenta \param d_inertia Device array of particle moments of inertia \param d_tag Device array of particle tags \param d_rtag Device array for reverse-lookup table \param d_pos_alt Device array of particle positions (output) \param d_vel_alt Device array of particle velocities (output) \param d_accel_alt Device array of particle accelerations (output) \param d_charge_alt Device array of particle charges (output) \param d_diameter_alt Device array of particle diameters (output) \param d_image_alt Device array of particle images (output) \param d_body_alt Device array of particle body tags (output) \param d_orientation_alt Device array of particle orientations (output) \param d_angmom_alt Device array of particle angular momenta (output) \param d_inertia Device array of particle moments of inertia (output) \param d_out Output array for packed particle data \param max_n_out Maximum number of elements to write to output array \returns Number of elements marked for removal */ unsigned int gpu_pdata_remove(const unsigned int N, const Scalar4 *d_pos, const Scalar4 *d_vel, const Scalar3 *d_accel, const Scalar *d_charge, const Scalar *d_diameter, const int3 *d_image, const unsigned int *d_body, const Scalar4 *d_orientation, const Scalar4 *d_angmom, const Scalar3 *d_inertia, const unsigned int *d_tag, unsigned int *d_rtag, Scalar4 *d_pos_alt, Scalar4 *d_vel_alt, Scalar3 *d_accel_alt, Scalar *d_charge_alt, Scalar *d_diameter_alt, int3 *d_image_alt, unsigned int *d_body_alt, Scalar4 *d_orientation_alt, Scalar4 *d_angmom_alt, Scalar3 *d_inertia_alt, unsigned int *d_tag_alt, pdata_element *d_out, unsigned int *d_comm_flags, unsigned int *d_comm_flags_out, unsigned int max_n_out, unsigned int *d_tmp, mgpu::ContextPtr mgpu_context) { unsigned int n_out; // partition particle data into local and removed particles unsigned int block_size =512; unsigned int n_blocks = N/block_size+1; // select nonzero communication flags hipLaunchKernelGGL(( gpu_select_sent_particles), dim3(n_blocks), dim3(block_size), 0, 0, N, d_comm_flags, d_tmp); // perform a scan over the array of ones and zeroes mgpu::Scan<mgpu::MgpuScanTypeExc>(d_tmp, N, (unsigned int) 0, mgpu::plus<unsigned int>(), (unsigned int *)NULL, &n_out, d_tmp, *mgpu_context); // Don't write past end of buffer if (n_out <= max_n_out) { // partition particle data into local and removed particles unsigned int block_size =512; unsigned int n_blocks = N/block_size+1; hipLaunchKernelGGL(( gpu_scatter_particle_data_kernel), dim3(n_blocks), dim3(block_size), 0, 0, N, d_pos, d_vel, d_accel, d_charge, d_diameter, d_image, d_body, d_orientation, d_angmom, d_inertia, d_tag, d_rtag, d_pos_alt, d_vel_alt, d_accel_alt, d_charge_alt, d_diameter_alt, d_image_alt, d_body_alt, d_orientation_alt, d_angmom_alt, d_inertia_alt, d_tag_alt, d_out, d_comm_flags, d_comm_flags_out, d_tmp); } // return elements written to output stream return n_out; } __global__ void gpu_pdata_add_particles_kernel(unsigned int old_nparticles, unsigned int num_add_ptls, Scalar4 *d_pos, Scalar4 *d_vel, Scalar3 *d_accel, Scalar *d_charge, Scalar *d_diameter, int3 *d_image, unsigned int *d_body, Scalar4 *d_orientation, Scalar4 *d_angmom, Scalar3 *d_inertia, unsigned int *d_tag, unsigned int *d_rtag, const pdata_element *d_in, unsigned int *d_comm_flags) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= num_add_ptls) return; pdata_element p = d_in[idx]; unsigned int add_idx = old_nparticles + idx; d_pos[add_idx] = p.pos; d_vel[add_idx] = p.vel; d_accel[add_idx] = p.accel; d_charge[add_idx] = p.charge; d_diameter[add_idx] = p.diameter; d_image[add_idx] = p.image; d_body[add_idx] = p.body; d_orientation[add_idx] = p.orientation; d_angmom[add_idx] = p.angmom; d_inertia[add_idx] = p.inertia; d_tag[add_idx] = p.tag; d_rtag[p.tag] = add_idx; d_comm_flags[add_idx] = 0; } /*! \param old_nparticles old local particle count \param num_add_ptls Number of particles in input array \param d_pos Device array of particle positions \param d_vel Device iarray of particle velocities \param d_accel Device array of particle accelerations \param d_charge Device array of particle charges \param d_diameter Device array of particle diameters \param d_image Device array of particle images \param d_body Device array of particle body tags \param d_orientation Device array of particle orientations \param d_angmom Device array of particle angular momenta \param d_inertia Device array of particle moments of inertia \param d_tag Device array of particle tags \param d_rtag Device array for reverse-lookup table \param d_in Device array of packed input particle data \param d_comm_flags Device array of communication flags (pdata) */ void gpu_pdata_add_particles(const unsigned int old_nparticles, const unsigned int num_add_ptls, Scalar4 *d_pos, Scalar4 *d_vel, Scalar3 *d_accel, Scalar *d_charge, Scalar *d_diameter, int3 *d_image, unsigned int *d_body, Scalar4 *d_orientation, Scalar4 *d_angmom, Scalar3 *d_inertia, unsigned int *d_tag, unsigned int *d_rtag, const pdata_element *d_in, unsigned int *d_comm_flags) { unsigned int block_size = 512; unsigned int n_blocks = num_add_ptls/block_size + 1; hipLaunchKernelGGL(( gpu_pdata_add_particles_kernel), dim3(n_blocks), dim3(block_size), 0, 0, old_nparticles, num_add_ptls, d_pos, d_vel, d_accel, d_charge, d_diameter, d_image, d_body, d_orientation, d_angmom, d_inertia, d_tag, d_rtag, d_in, d_comm_flags); } #endif // ENABLE_MPI
8439857152630fee7efdfa6399b2d4b4c2a563c9.cu
// Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: jglaser #include "ParticleData.cuh" /*! \file ParticleData.cu \brief ImplementsGPU kernel code and data structure functions used by ParticleData */ #ifdef ENABLE_MPI #include <iterator> #include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/scatter.h> #include <thrust/device_ptr.h> #include "hoomd/extern/kernels/scan.cuh" //! Kernel to partition particle data __global__ void gpu_scatter_particle_data_kernel( const unsigned int N, const Scalar4 *d_pos, const Scalar4 *d_vel, const Scalar3 *d_accel, const Scalar *d_charge, const Scalar *d_diameter, const int3 *d_image, const unsigned int *d_body, const Scalar4 *d_orientation, const Scalar4 *d_angmom, const Scalar3 *d_inertia, const unsigned int *d_tag, unsigned int *d_rtag, Scalar4 *d_pos_alt, Scalar4 *d_vel_alt, Scalar3 *d_accel_alt, Scalar *d_charge_alt, Scalar *d_diameter_alt, int3 *d_image_alt, unsigned int *d_body_alt, Scalar4 *d_orientation_alt, Scalar4 *d_angmom_alt, Scalar3 *d_inertia_alt, unsigned int *d_tag_alt, pdata_element *d_out, unsigned int *d_comm_flags, unsigned int *d_comm_flags_out, const unsigned int *d_scan) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= N) return; bool remove = d_comm_flags[idx]; unsigned int scan_remove = d_scan[idx]; unsigned int scan_keep = idx - scan_remove; if (remove) { pdata_element p; p.pos = d_pos[idx]; p.vel = d_vel[idx]; p.accel = d_accel[idx]; p.charge = d_charge[idx]; p.diameter = d_diameter[idx]; p.image = d_image[idx]; p.body = d_body[idx]; p.orientation = d_orientation[idx]; p.angmom = d_angmom[idx]; p.inertia = d_inertia[idx]; p.tag = d_tag[idx]; d_out[scan_remove] = p; d_comm_flags_out[scan_remove] = d_comm_flags[idx]; // reset communication flags d_comm_flags[idx] = 0; // reset rtag d_rtag[p.tag] = NOT_LOCAL; } else { d_pos_alt[scan_keep] = d_pos[idx]; d_vel_alt[scan_keep] = d_vel[idx]; d_accel_alt[scan_keep] = d_accel[idx]; d_charge_alt[scan_keep] = d_charge[idx]; d_diameter_alt[scan_keep] = d_diameter[idx]; d_image_alt[scan_keep] = d_image[idx]; d_body_alt[scan_keep] = d_body[idx]; d_orientation_alt[scan_keep] = d_orientation[idx]; d_angmom_alt[scan_keep] = d_angmom[idx]; d_inertia_alt[scan_keep] = d_inertia[idx]; unsigned int tag = d_tag[idx]; d_tag_alt[scan_keep] = tag; // update rtag d_rtag[tag] = scan_keep; } } __global__ void gpu_select_sent_particles( unsigned int N, unsigned int *d_comm_flags, unsigned int *d_tmp) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= N) return; d_tmp[idx] = d_comm_flags[idx] ? 1 : 0; } /*! \param N Number of local particles \param d_pos Device array of particle positions \param d_vel Device array of particle velocities \param d_accel Device array of particle accelerations \param d_charge Device array of particle charges \param d_diameter Device array of particle diameters \param d_image Device array of particle images \param d_body Device array of particle body tags \param d_orientation Device array of particle orientations \param d_angmom Device array of particle angular momenta \param d_inertia Device array of particle moments of inertia \param d_tag Device array of particle tags \param d_rtag Device array for reverse-lookup table \param d_pos_alt Device array of particle positions (output) \param d_vel_alt Device array of particle velocities (output) \param d_accel_alt Device array of particle accelerations (output) \param d_charge_alt Device array of particle charges (output) \param d_diameter_alt Device array of particle diameters (output) \param d_image_alt Device array of particle images (output) \param d_body_alt Device array of particle body tags (output) \param d_orientation_alt Device array of particle orientations (output) \param d_angmom_alt Device array of particle angular momenta (output) \param d_inertia Device array of particle moments of inertia (output) \param d_out Output array for packed particle data \param max_n_out Maximum number of elements to write to output array \returns Number of elements marked for removal */ unsigned int gpu_pdata_remove(const unsigned int N, const Scalar4 *d_pos, const Scalar4 *d_vel, const Scalar3 *d_accel, const Scalar *d_charge, const Scalar *d_diameter, const int3 *d_image, const unsigned int *d_body, const Scalar4 *d_orientation, const Scalar4 *d_angmom, const Scalar3 *d_inertia, const unsigned int *d_tag, unsigned int *d_rtag, Scalar4 *d_pos_alt, Scalar4 *d_vel_alt, Scalar3 *d_accel_alt, Scalar *d_charge_alt, Scalar *d_diameter_alt, int3 *d_image_alt, unsigned int *d_body_alt, Scalar4 *d_orientation_alt, Scalar4 *d_angmom_alt, Scalar3 *d_inertia_alt, unsigned int *d_tag_alt, pdata_element *d_out, unsigned int *d_comm_flags, unsigned int *d_comm_flags_out, unsigned int max_n_out, unsigned int *d_tmp, mgpu::ContextPtr mgpu_context) { unsigned int n_out; // partition particle data into local and removed particles unsigned int block_size =512; unsigned int n_blocks = N/block_size+1; // select nonzero communication flags gpu_select_sent_particles<<<n_blocks, block_size>>>( N, d_comm_flags, d_tmp); // perform a scan over the array of ones and zeroes mgpu::Scan<mgpu::MgpuScanTypeExc>(d_tmp, N, (unsigned int) 0, mgpu::plus<unsigned int>(), (unsigned int *)NULL, &n_out, d_tmp, *mgpu_context); // Don't write past end of buffer if (n_out <= max_n_out) { // partition particle data into local and removed particles unsigned int block_size =512; unsigned int n_blocks = N/block_size+1; gpu_scatter_particle_data_kernel<<<n_blocks, block_size>>>( N, d_pos, d_vel, d_accel, d_charge, d_diameter, d_image, d_body, d_orientation, d_angmom, d_inertia, d_tag, d_rtag, d_pos_alt, d_vel_alt, d_accel_alt, d_charge_alt, d_diameter_alt, d_image_alt, d_body_alt, d_orientation_alt, d_angmom_alt, d_inertia_alt, d_tag_alt, d_out, d_comm_flags, d_comm_flags_out, d_tmp); } // return elements written to output stream return n_out; } __global__ void gpu_pdata_add_particles_kernel(unsigned int old_nparticles, unsigned int num_add_ptls, Scalar4 *d_pos, Scalar4 *d_vel, Scalar3 *d_accel, Scalar *d_charge, Scalar *d_diameter, int3 *d_image, unsigned int *d_body, Scalar4 *d_orientation, Scalar4 *d_angmom, Scalar3 *d_inertia, unsigned int *d_tag, unsigned int *d_rtag, const pdata_element *d_in, unsigned int *d_comm_flags) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= num_add_ptls) return; pdata_element p = d_in[idx]; unsigned int add_idx = old_nparticles + idx; d_pos[add_idx] = p.pos; d_vel[add_idx] = p.vel; d_accel[add_idx] = p.accel; d_charge[add_idx] = p.charge; d_diameter[add_idx] = p.diameter; d_image[add_idx] = p.image; d_body[add_idx] = p.body; d_orientation[add_idx] = p.orientation; d_angmom[add_idx] = p.angmom; d_inertia[add_idx] = p.inertia; d_tag[add_idx] = p.tag; d_rtag[p.tag] = add_idx; d_comm_flags[add_idx] = 0; } /*! \param old_nparticles old local particle count \param num_add_ptls Number of particles in input array \param d_pos Device array of particle positions \param d_vel Device iarray of particle velocities \param d_accel Device array of particle accelerations \param d_charge Device array of particle charges \param d_diameter Device array of particle diameters \param d_image Device array of particle images \param d_body Device array of particle body tags \param d_orientation Device array of particle orientations \param d_angmom Device array of particle angular momenta \param d_inertia Device array of particle moments of inertia \param d_tag Device array of particle tags \param d_rtag Device array for reverse-lookup table \param d_in Device array of packed input particle data \param d_comm_flags Device array of communication flags (pdata) */ void gpu_pdata_add_particles(const unsigned int old_nparticles, const unsigned int num_add_ptls, Scalar4 *d_pos, Scalar4 *d_vel, Scalar3 *d_accel, Scalar *d_charge, Scalar *d_diameter, int3 *d_image, unsigned int *d_body, Scalar4 *d_orientation, Scalar4 *d_angmom, Scalar3 *d_inertia, unsigned int *d_tag, unsigned int *d_rtag, const pdata_element *d_in, unsigned int *d_comm_flags) { unsigned int block_size = 512; unsigned int n_blocks = num_add_ptls/block_size + 1; gpu_pdata_add_particles_kernel<<<n_blocks, block_size>>>(old_nparticles, num_add_ptls, d_pos, d_vel, d_accel, d_charge, d_diameter, d_image, d_body, d_orientation, d_angmom, d_inertia, d_tag, d_rtag, d_in, d_comm_flags); } #endif // ENABLE_MPI
b6fe4d23a74dfd25ff922b2620060596ebfec25d.hip
// !!! This is a file automatically generated by hipify!!! /* For more information, please see: http://software.sci.utah.edu The MIT License Copyright (c) 2012-2013 Scientific Computing and Imaging Institute, University of Utah License for the specific language governing rights and limitations under Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define DEBUG 0 // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <sys/time.h> #include <teem/nrrd.h> // includes, GL #include "opengl_include.h" #include <float.h> #include <assert.h> // includes //#include "cutil.h" //#include "cutil_math.h" #include "helper_cuda.h" #include "helper_cuda_gl.h" #include "helper_functions.h" #include "helper_math.h" #include <hip/hip_runtime.h> #include "cuda_gl_interop.h" #include <cstdlib> #include "RenderParameters.h" #include "kernel_render.h" #include "kernel_filter.h" RenderParameters* dparams; hipArray* data_array = 0, *texture_array = 0, *color_array = 0; unsigned int last_width, last_height; unsigned int* d_out = 0; float4* d_inout = 0; float* d_rand_x = 0; void Host_CopyMemory(RenderParameters* params); void Host_Resize(RenderParameters* paramsp); __host__ int rgbToIntHost(float r, float g, float b) { r = clamp(r, 0.0f, 255.0f); g = clamp(g, 0.0f, 255.0f); b = clamp(b, 0.0f, 255.0f); return (int(b)<<16) | (int(g)<<8) | int(r); } __host__ float getRandom() { return drand48(); } extern "C" { void Host_Init(RenderParameters* paramsp) { hipMalloc((void**)&dparams, sizeof(RenderParameters)); hipMemcpy(dparams, paramsp, sizeof(RenderParameters),hipMemcpyHostToDevice); //dparams = (RenderParameters*)malloc(sizeof(RenderParameters)); //setup data texture tex_data.addressMode[0] = hipAddressModeClamp; tex_data.addressMode[1] = hipAddressModeClamp; tex_data.filterMode = hipFilterModeLinear; tex_data.normalized = false; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float4>(); hipExtent extent = make_hipExtent(paramsp->data_width, paramsp->data_height, paramsp->data_depth); checkCudaErrors( hipMalloc3DArray(&data_array, &channelDesc, extent) ); hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr((void*)paramsp->data, extent.width*sizeof(float4), extent.width, extent.height); copyParams.dstArray = data_array; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; checkCudaErrors( hipMemcpy3D(&copyParams) ); hipBindTextureToArray(tex_data, data_array, channelDesc); /* //setup data texture tex_data2.addressMode[0] = hipAddressModeWrap; tex_data2.addressMode[1] = hipAddressModeWrap; tex_data2.filterMode = hipFilterModeLinear; tex_data2.normalized = false; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipExtent const extent = make_hipExtent(paramsp->data_width, paramsp->data_height, paramsp->data_depth); checkCudaErrors( hipMalloc3DArray(&data_array, &channelDesc, extent) ); hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr((void*)paramsp->data2, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = data_array; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; checkCudaErrors( hipMemcpy3D(&copyParams) ); checkCudaErrors( hipBindTextureToArray(tex_data2, data_array, channelDesc) ); */ //setup cutoff texture if (paramsp->cutoff == CUTOFF_IMAGE) { tex_cutoff.addressMode[0] = hipAddressModeClamp; tex_cutoff.addressMode[1] = hipAddressModeClamp; tex_cutoff.filterMode = hipFilterModeLinear; tex_cutoff.normalized = true; hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); hipMallocArray(&texture_array, &channelDesc2, paramsp->cutoffSize.x, paramsp->cutoffSize.y); hipMemcpyToArray(texture_array, 0, 0, paramsp->cutoff_rgb, paramsp->cutoffSize.x*paramsp->cutoffSize.y*sizeof(float4), hipMemcpyHostToDevice); hipBindTextureToArray(tex_cutoff, texture_array, channelDesc2); paramsp->cutoff_dirty = false; } // //color testing code // /* tex_color.addressMode[0] = hipAddressModeWrap; tex_color.addressMode[1] = hipAddressModeWrap; tex_color.filterMode = hipFilterModeLinear; tex_color.normalized = false; hipChannelFormatDesc channelDesc3 = hipCreateChannelDesc<float4>(); extent = make_hipExtent(paramsp->data_width, paramsp->data_height, paramsp->data_depth); checkCudaErrors( hipMalloc3DArray(&color_array, &channelDesc3, extent) ); hipMemcpy3DParms copyParams3 = {0}; copyParams3.srcPtr = make_hipPitchedPtr((void*)paramsp->color_data, extent.width*sizeof(float4), extent.width, extent.height); copyParams3.dstArray = color_array; copyParams3.extent = extent; copyParams3.kind = hipMemcpyHostToDevice; checkCudaErrors( hipMemcpy3D(&copyParams3) ); hipBindTextureToArray(tex_color, color_array, channelDesc); */ Host_Resize(paramsp); } void Host_Render(RenderParameters* paramsp) { printf("rendering..."); if (last_width != paramsp->width || last_height != paramsp->height) Host_Resize(paramsp); //for(size_t i = 0; i < paramsp->width*paramsp->height; i++) // paramsp->inout_rgb[i] = make_float4(0,0,0,0); RenderParameters& params = *paramsp; Host_CopyMemory(paramsp); dim3 block(16,16,1); dim3 grid((params.width/block.x),(params.height/block.y),1); //positions go 0 to 100, which maps to -1 to 1 on each lightplace axis //float3 lookup_scale = {1.0f/(params.max_bound.x-params.min_bound.x), 1.0f/(params.max_bound.y - params.min_bound.y), 1.0f/(params.max_bound.z-params.min_bound.z)}; paramsp->rand1 = drand48(); paramsp->rand2 = drand48(); paramsp->rand3 = drand48(); paramsp->rand4 = drand48(); // allocate space on the device for the results hipMemcpy(dparams, paramsp, sizeof(RenderParameters),hipMemcpyHostToDevice); hipDeviceSynchronize(); for(int i =0; i < paramsp->numRenderPasses; i++) { hipLaunchKernelGGL(( kernel_render), dim3(grid), dim3(block), 0, 0, dparams, d_inout, d_out, d_rand_x); hipDeviceSynchronize(); paramsp->passes+=paramsp->raysPerPixel; } // hipMemcpy(paramsp->out_rgb, d_out, sizeof(unsigned int)*params.width*params.height, hipMemcpyDeviceToHost); // hipMemcpy(paramsp->inout_rgb, d_inout, sizeof(float4)*params.width*params.height, hipMemcpyDeviceToHost); //for(int i = 0 ; i < params.width*params.height; i++) // { // paramsp->out_rgb[i] = 0; //} hipDeviceSynchronize(); hipLaunchKernelGGL(( kernel_filter), dim3(grid), dim3(block), 0, 0, dparams, d_inout, d_out); hipMemcpy(paramsp->out_rgb, d_out, sizeof(unsigned int)*params.width*params.height, hipMemcpyDeviceToHost); hipDeviceSynchronize(); /* float m = 255.0/paramsp->passes; for(int i = 0; i < paramsp->width; i++) { for(int j = 0; j < paramsp->height; j++) { size_t index = i + j*paramsp->width; float4 c = paramsp->inout_rgb[index]; //c = make_float4(1.0,1.0,1.0,1.0); paramsp->out_rgb[index] = rgbToIntHost(c.x*m,c.y*m,c.z*m); } }*/ for(int i = 0; i < paramsp->width; i++) { for(int j = 0; j < paramsp->height; j++) { size_t index = i + j*paramsp->width; unsigned int c = paramsp->out_rgb[index]; unsigned char* ca = (unsigned char*)&c; ca[3] = 255; paramsp->out_rgb[index] = c; } } glDrawPixels(params.width, params.height, GL_RGBA, GL_UNSIGNED_BYTE, paramsp->out_rgb); printf(" rendering finished.\n"); } void Host_Clear(RenderParameters* paramsp) { if (!d_inout) return; hipMemcpy(d_inout, paramsp->inout_rgb, sizeof(float4)*paramsp->width*paramsp->height, hipMemcpyHostToDevice); } void Host_Kill() { RenderParameters* dparams; hipArray* data_array = 0, *texture_array = 0, *color_array = 0; unsigned int last_width, last_height; unsigned int* d_out = 0; float4* d_inout = 0; float* d_rand_x = 0; hipFree(d_inout); hipFree(dparams); hipFree(d_out); hipFree(d_rand_x); hipUnbindTexture (tex_data); checkCudaErrors (hipFreeArray (data_array)); hipUnbindTexture (tex_data2); checkCudaErrors (hipFreeArray (texture_array)); hipUnbindTexture (tex_cutoff); checkCudaErrors (hipFreeArray (color_array)); } } void Host_CopyMemory(RenderParameters* paramsp) { //TODO: NOTE: for debugging perposes only memcopy, however need to support size changes if (paramsp->cutoff_dirty) { //if (texture_array) // hipFree(texture_array); hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); //hipMallocArray(&texture_array, &channelDesc2, paramsp->cutoffSize.x, paramsp->cutoffSize.y); hipMemcpyToArray(texture_array, 0, 0, paramsp->cutoff_rgb, paramsp->cutoffSize.x*paramsp->cutoffSize.y*sizeof(float4), hipMemcpyHostToDevice); paramsp->cutoff_dirty = false; } } void Host_Resize(RenderParameters* paramsp) { printf("resizing to %d %d \n", paramsp->width, paramsp->height); paramsp->passes = 0; int window_size = paramsp->width*paramsp->height; if (d_inout) hipFree(d_inout); hipMalloc((void**)&d_inout, sizeof(float4)*paramsp->width*paramsp->height); if (paramsp->inout_rgb) delete [] paramsp->inout_rgb; paramsp->inout_rgb = new float4[paramsp->width*paramsp->height]; for(size_t i = 0; i < paramsp->width*paramsp->height; i++) paramsp->inout_rgb[i] = make_float4(0,0,0,0); hipMemcpy(d_inout, paramsp->inout_rgb, sizeof(float4)*paramsp->width*paramsp->height, hipMemcpyHostToDevice); if (d_out) hipFree(d_out); hipMalloc((void**)&d_out, sizeof(unsigned int)*paramsp->width*paramsp->height); last_width = paramsp->width; last_height = paramsp->height; if (paramsp->out_rgb) free(paramsp->out_rgb); paramsp->out_rgb = (unsigned int*)malloc(sizeof(unsigned int)*paramsp->width*paramsp->height); if (d_rand_x) hipFree(d_rand_x); paramsp->random_array = (float*)malloc(sizeof(float)*window_size); for(int i =0;i<window_size;i++){ paramsp->random_array[i] = getRandom(); } hipMalloc((void**)&d_rand_x, window_size*sizeof(float)); hipMemcpy( d_rand_x, paramsp->random_array,sizeof(float)*window_size, hipMemcpyHostToDevice ); }
b6fe4d23a74dfd25ff922b2620060596ebfec25d.cu
/* For more information, please see: http://software.sci.utah.edu The MIT License Copyright (c) 2012-2013 Scientific Computing and Imaging Institute, University of Utah License for the specific language governing rights and limitations under Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define DEBUG 0 // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <sys/time.h> #include <teem/nrrd.h> // includes, GL #include "opengl_include.h" #include <float.h> #include <assert.h> // includes //#include "cutil.h" //#include "cutil_math.h" #include "helper_cuda.h" #include "helper_cuda_gl.h" #include "helper_functions.h" #include "helper_math.h" #include <cuda.h> #include "cuda_gl_interop.h" #include <cstdlib> #include "RenderParameters.h" #include "kernel_render.h" #include "kernel_filter.h" RenderParameters* dparams; cudaArray* data_array = 0, *texture_array = 0, *color_array = 0; unsigned int last_width, last_height; unsigned int* d_out = 0; float4* d_inout = 0; float* d_rand_x = 0; void Host_CopyMemory(RenderParameters* params); void Host_Resize(RenderParameters* paramsp); __host__ int rgbToIntHost(float r, float g, float b) { r = clamp(r, 0.0f, 255.0f); g = clamp(g, 0.0f, 255.0f); b = clamp(b, 0.0f, 255.0f); return (int(b)<<16) | (int(g)<<8) | int(r); } __host__ float getRandom() { return drand48(); } extern "C" { void Host_Init(RenderParameters* paramsp) { cudaMalloc((void**)&dparams, sizeof(RenderParameters)); cudaMemcpy(dparams, paramsp, sizeof(RenderParameters),cudaMemcpyHostToDevice); //dparams = (RenderParameters*)malloc(sizeof(RenderParameters)); //setup data texture tex_data.addressMode[0] = cudaAddressModeClamp; tex_data.addressMode[1] = cudaAddressModeClamp; tex_data.filterMode = cudaFilterModeLinear; tex_data.normalized = false; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float4>(); cudaExtent extent = make_cudaExtent(paramsp->data_width, paramsp->data_height, paramsp->data_depth); checkCudaErrors( cudaMalloc3DArray(&data_array, &channelDesc, extent) ); cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr((void*)paramsp->data, extent.width*sizeof(float4), extent.width, extent.height); copyParams.dstArray = data_array; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; checkCudaErrors( cudaMemcpy3D(&copyParams) ); cudaBindTextureToArray(tex_data, data_array, channelDesc); /* //setup data texture tex_data2.addressMode[0] = cudaAddressModeWrap; tex_data2.addressMode[1] = cudaAddressModeWrap; tex_data2.filterMode = cudaFilterModeLinear; tex_data2.normalized = false; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaExtent const extent = make_cudaExtent(paramsp->data_width, paramsp->data_height, paramsp->data_depth); checkCudaErrors( cudaMalloc3DArray(&data_array, &channelDesc, extent) ); cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr((void*)paramsp->data2, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = data_array; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; checkCudaErrors( cudaMemcpy3D(&copyParams) ); checkCudaErrors( cudaBindTextureToArray(tex_data2, data_array, channelDesc) ); */ //setup cutoff texture if (paramsp->cutoff == CUTOFF_IMAGE) { tex_cutoff.addressMode[0] = cudaAddressModeClamp; tex_cutoff.addressMode[1] = cudaAddressModeClamp; tex_cutoff.filterMode = cudaFilterModeLinear; tex_cutoff.normalized = true; cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); cudaMallocArray(&texture_array, &channelDesc2, paramsp->cutoffSize.x, paramsp->cutoffSize.y); cudaMemcpyToArray(texture_array, 0, 0, paramsp->cutoff_rgb, paramsp->cutoffSize.x*paramsp->cutoffSize.y*sizeof(float4), cudaMemcpyHostToDevice); cudaBindTextureToArray(tex_cutoff, texture_array, channelDesc2); paramsp->cutoff_dirty = false; } // //color testing code // /* tex_color.addressMode[0] = cudaAddressModeWrap; tex_color.addressMode[1] = cudaAddressModeWrap; tex_color.filterMode = cudaFilterModeLinear; tex_color.normalized = false; cudaChannelFormatDesc channelDesc3 = cudaCreateChannelDesc<float4>(); extent = make_cudaExtent(paramsp->data_width, paramsp->data_height, paramsp->data_depth); checkCudaErrors( cudaMalloc3DArray(&color_array, &channelDesc3, extent) ); cudaMemcpy3DParms copyParams3 = {0}; copyParams3.srcPtr = make_cudaPitchedPtr((void*)paramsp->color_data, extent.width*sizeof(float4), extent.width, extent.height); copyParams3.dstArray = color_array; copyParams3.extent = extent; copyParams3.kind = cudaMemcpyHostToDevice; checkCudaErrors( cudaMemcpy3D(&copyParams3) ); cudaBindTextureToArray(tex_color, color_array, channelDesc); */ Host_Resize(paramsp); } void Host_Render(RenderParameters* paramsp) { printf("rendering..."); if (last_width != paramsp->width || last_height != paramsp->height) Host_Resize(paramsp); //for(size_t i = 0; i < paramsp->width*paramsp->height; i++) // paramsp->inout_rgb[i] = make_float4(0,0,0,0); RenderParameters& params = *paramsp; Host_CopyMemory(paramsp); dim3 block(16,16,1); dim3 grid((params.width/block.x),(params.height/block.y),1); //positions go 0 to 100, which maps to -1 to 1 on each lightplace axis //float3 lookup_scale = {1.0f/(params.max_bound.x-params.min_bound.x), 1.0f/(params.max_bound.y - params.min_bound.y), 1.0f/(params.max_bound.z-params.min_bound.z)}; paramsp->rand1 = drand48(); paramsp->rand2 = drand48(); paramsp->rand3 = drand48(); paramsp->rand4 = drand48(); // allocate space on the device for the results cudaMemcpy(dparams, paramsp, sizeof(RenderParameters),cudaMemcpyHostToDevice); cudaThreadSynchronize(); for(int i =0; i < paramsp->numRenderPasses; i++) { kernel_render<<< grid, block>>>(dparams, d_inout, d_out, d_rand_x); cudaThreadSynchronize(); paramsp->passes+=paramsp->raysPerPixel; } // cudaMemcpy(paramsp->out_rgb, d_out, sizeof(unsigned int)*params.width*params.height, cudaMemcpyDeviceToHost); // cudaMemcpy(paramsp->inout_rgb, d_inout, sizeof(float4)*params.width*params.height, cudaMemcpyDeviceToHost); //for(int i = 0 ; i < params.width*params.height; i++) // { // paramsp->out_rgb[i] = 0; //} cudaThreadSynchronize(); kernel_filter<<< grid, block>>>(dparams, d_inout, d_out); cudaMemcpy(paramsp->out_rgb, d_out, sizeof(unsigned int)*params.width*params.height, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); /* float m = 255.0/paramsp->passes; for(int i = 0; i < paramsp->width; i++) { for(int j = 0; j < paramsp->height; j++) { size_t index = i + j*paramsp->width; float4 c = paramsp->inout_rgb[index]; //c = make_float4(1.0,1.0,1.0,1.0); paramsp->out_rgb[index] = rgbToIntHost(c.x*m,c.y*m,c.z*m); } }*/ for(int i = 0; i < paramsp->width; i++) { for(int j = 0; j < paramsp->height; j++) { size_t index = i + j*paramsp->width; unsigned int c = paramsp->out_rgb[index]; unsigned char* ca = (unsigned char*)&c; ca[3] = 255; paramsp->out_rgb[index] = c; } } glDrawPixels(params.width, params.height, GL_RGBA, GL_UNSIGNED_BYTE, paramsp->out_rgb); printf(" rendering finished.\n"); } void Host_Clear(RenderParameters* paramsp) { if (!d_inout) return; cudaMemcpy(d_inout, paramsp->inout_rgb, sizeof(float4)*paramsp->width*paramsp->height, cudaMemcpyHostToDevice); } void Host_Kill() { RenderParameters* dparams; cudaArray* data_array = 0, *texture_array = 0, *color_array = 0; unsigned int last_width, last_height; unsigned int* d_out = 0; float4* d_inout = 0; float* d_rand_x = 0; cudaFree(d_inout); cudaFree(dparams); cudaFree(d_out); cudaFree(d_rand_x); cudaUnbindTexture (tex_data); checkCudaErrors (cudaFreeArray (data_array)); cudaUnbindTexture (tex_data2); checkCudaErrors (cudaFreeArray (texture_array)); cudaUnbindTexture (tex_cutoff); checkCudaErrors (cudaFreeArray (color_array)); } } void Host_CopyMemory(RenderParameters* paramsp) { //TODO: NOTE: for debugging perposes only memcopy, however need to support size changes if (paramsp->cutoff_dirty) { //if (texture_array) // cudaFree(texture_array); cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); //cudaMallocArray(&texture_array, &channelDesc2, paramsp->cutoffSize.x, paramsp->cutoffSize.y); cudaMemcpyToArray(texture_array, 0, 0, paramsp->cutoff_rgb, paramsp->cutoffSize.x*paramsp->cutoffSize.y*sizeof(float4), cudaMemcpyHostToDevice); paramsp->cutoff_dirty = false; } } void Host_Resize(RenderParameters* paramsp) { printf("resizing to %d %d \n", paramsp->width, paramsp->height); paramsp->passes = 0; int window_size = paramsp->width*paramsp->height; if (d_inout) cudaFree(d_inout); cudaMalloc((void**)&d_inout, sizeof(float4)*paramsp->width*paramsp->height); if (paramsp->inout_rgb) delete [] paramsp->inout_rgb; paramsp->inout_rgb = new float4[paramsp->width*paramsp->height]; for(size_t i = 0; i < paramsp->width*paramsp->height; i++) paramsp->inout_rgb[i] = make_float4(0,0,0,0); cudaMemcpy(d_inout, paramsp->inout_rgb, sizeof(float4)*paramsp->width*paramsp->height, cudaMemcpyHostToDevice); if (d_out) cudaFree(d_out); cudaMalloc((void**)&d_out, sizeof(unsigned int)*paramsp->width*paramsp->height); last_width = paramsp->width; last_height = paramsp->height; if (paramsp->out_rgb) free(paramsp->out_rgb); paramsp->out_rgb = (unsigned int*)malloc(sizeof(unsigned int)*paramsp->width*paramsp->height); if (d_rand_x) cudaFree(d_rand_x); paramsp->random_array = (float*)malloc(sizeof(float)*window_size); for(int i =0;i<window_size;i++){ paramsp->random_array[i] = getRandom(); } cudaMalloc((void**)&d_rand_x, window_size*sizeof(float)); cudaMemcpy( d_rand_x, paramsp->random_array,sizeof(float)*window_size, cudaMemcpyHostToDevice ); }
9319030e956e4c90af51831fc79bd9baae55d1cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include "Mandelbrot.h" #include "Device.h" #include "MathTools.h" using cpu::IntervalI; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void mandelbrot(uchar4* ptrDevPixels, int w, int h, DomaineMath domaineMath, int n); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*-------------------------*\ |* Constructeur *| \*-------------------------*/ Mandelbrot::Mandelbrot(int w, int h, int nMin, int nMax) : variateurN(IntervalI(nMin, nMax), 1) { // Inputs this->w = w; this->h = h; this->ptrDomaineMathInit = new DomaineMath(-2.1, -1.3, 0.8, 1.3); // Mandelbrot // this->ptrDomaineMathInit = new DomaineMath(-1.3, -1.4, 1.4, 1.3); // Julia // Tools this->dg = dim3(8, 8, 1); // disons a optimiser this->db = dim3(16, 16, 1); // disons a optimiser //Outputs this->title = "Mandelbrot_CUDA (Zoomable)"; // Check: //print(dg, db); Device::assertDim(dg, db); } Mandelbrot::~Mandelbrot() { delete ptrDomaineMathInit; } /*-------------------------*\ |* Methode *| \*-------------------------*/ /** * Override * Call periodicly by the API */ void Mandelbrot::process(uchar4* ptrDevPixels, int w, int h, const DomaineMath& domaineMath) {hipLaunchKernelGGL(( mandelbrot), dim3(dg),dim3(db), 0, 0, ptrDevPixels,w,h,domaineMath, n); } /** * Override * Call periodicly by the API */ void Mandelbrot::animationStep() { this->n = variateurN.varierAndGet(); // in [0,2pi] } /*--------------*\ |* get *| \*--------------*/ /** * Override */ DomaineMath* Mandelbrot::getDomaineMathInit(void) { return ptrDomaineMathInit; } /** * Override */ float Mandelbrot::getAnimationPara(void) { return n; } /** * Override */ int Mandelbrot::getW(void) { return w; } /** * Override */ int Mandelbrot::getH(void) { return h; } /** * Override */ string Mandelbrot::getTitle(void) { return title; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
9319030e956e4c90af51831fc79bd9baae55d1cb.cu
#include <assert.h> #include "Mandelbrot.h" #include "Device.h" #include "MathTools.h" using cpu::IntervalI; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void mandelbrot(uchar4* ptrDevPixels, int w, int h, DomaineMath domaineMath, int n); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*-------------------------*\ |* Constructeur *| \*-------------------------*/ Mandelbrot::Mandelbrot(int w, int h, int nMin, int nMax) : variateurN(IntervalI(nMin, nMax), 1) { // Inputs this->w = w; this->h = h; this->ptrDomaineMathInit = new DomaineMath(-2.1, -1.3, 0.8, 1.3); // Mandelbrot // this->ptrDomaineMathInit = new DomaineMath(-1.3, -1.4, 1.4, 1.3); // Julia // Tools this->dg = dim3(8, 8, 1); // disons a optimiser this->db = dim3(16, 16, 1); // disons a optimiser //Outputs this->title = "Mandelbrot_CUDA (Zoomable)"; // Check: //print(dg, db); Device::assertDim(dg, db); } Mandelbrot::~Mandelbrot() { delete ptrDomaineMathInit; } /*-------------------------*\ |* Methode *| \*-------------------------*/ /** * Override * Call periodicly by the API */ void Mandelbrot::process(uchar4* ptrDevPixels, int w, int h, const DomaineMath& domaineMath) { mandelbrot<<<dg,db>>>(ptrDevPixels,w,h,domaineMath, n); } /** * Override * Call periodicly by the API */ void Mandelbrot::animationStep() { this->n = variateurN.varierAndGet(); // in [0,2pi] } /*--------------*\ |* get *| \*--------------*/ /** * Override */ DomaineMath* Mandelbrot::getDomaineMathInit(void) { return ptrDomaineMathInit; } /** * Override */ float Mandelbrot::getAnimationPara(void) { return n; } /** * Override */ int Mandelbrot::getW(void) { return w; } /** * Override */ int Mandelbrot::getH(void) { return h; } /** * Override */ string Mandelbrot::getTitle(void) { return title; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
394b6e1ee63cac4ac53e19c71e3c7859442eed12.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpu_seqwr_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *buffer = NULL; hipMalloc(&buffer, XSIZE*YSIZE); size_t reps = 1; size_t elements = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpu_seqwr_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,reps,elements); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpu_seqwr_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,reps,elements); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpu_seqwr_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,reps,elements); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
394b6e1ee63cac4ac53e19c71e3c7859442eed12.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpu_seqwr_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *buffer = NULL; cudaMalloc(&buffer, XSIZE*YSIZE); size_t reps = 1; size_t elements = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpu_seqwr_kernel<<<gridBlock,threadBlock>>>(buffer,reps,elements); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpu_seqwr_kernel<<<gridBlock,threadBlock>>>(buffer,reps,elements); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpu_seqwr_kernel<<<gridBlock,threadBlock>>>(buffer,reps,elements); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c25eede8bdc6db4d36404ac6541086bb83b8ee4a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/gpu_math_functions.cuh" namespace caffe { template<typename T, typename TR> __global__ void convert_kernel(const unsigned int n, const T* in, TR* out) { CUDA_KERNEL_LOOP(i, n) { out[i] = in[i]; } } template<> __global__ void convert_kernel(const unsigned int n, const half2* in, float2* out) { CUDA_KERNEL_LOOP(i, n) { out[i] = __half22float2(in[i]); } } template<> __global__ void convert_kernel(const unsigned int n, const float2* in, half2* out) { CUDA_KERNEL_LOOP(i, n) { out[i] = float22half2_clip(in[i]); } } template<typename T, typename TR> void caffe_gpu_convert(const unsigned int N, const T* in, TR* out) { hipStream_t stream = Caffe::thread_stream(); //LOG(INFO) << "caffe_gpu_convert stream = " << stream; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( convert_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, in, out); CUDA_POST_KERNEL_CHECK; CUDA_CHECK(hipStreamSynchronize(stream)); } template<> void caffe_gpu_convert<float, float16>(const unsigned int n, const float* in, float16* out) { hipStream_t stream = Caffe::thread_stream(); //LOG(INFO) << "caffe_gpu_convert stream = " << stream; const unsigned int n2 = even(n) / 2; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( convert_kernel), dim3(CAFFE_GET_BLOCKS_HALF(n2)), dim3(CAFFE_CUDA_NUM_THREADS_HALF), 0, stream, n2, reinterpret_cast<const float2*>(in), reinterpret_cast<half2*>(out)); CUDA_POST_KERNEL_CHECK; CUDA_CHECK(hipStreamSynchronize(stream)); } template<> void caffe_gpu_convert<float16, float>(const unsigned int n, const float16* in, float* out) { hipStream_t stream = Caffe::thread_stream(); const unsigned int n2 = even(n) / 2; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( convert_kernel), dim3(CAFFE_GET_BLOCKS_HALF(n2)), dim3(CAFFE_CUDA_NUM_THREADS_HALF), 0, stream, n2, reinterpret_cast<const half2*>(in), reinterpret_cast<float2*>(out)); CUDA_POST_KERNEL_CHECK; CUDA_CHECK(hipStreamSynchronize(stream)); } template void caffe_gpu_convert<double, float16>(const unsigned int n, const double* in, float16* out); template void caffe_gpu_convert<float16, double>(const unsigned int n, const float16* in, double* out); template void caffe_gpu_convert<double, float>(const unsigned int n, const double* in, float* out); template void caffe_gpu_convert<float, double>(const unsigned int n, const float* in, double* out); template<> void caffe_gpu_convert<float, float>(const unsigned int n, const float* in, float* out) { caffe_copy(n, in, out); } template<> void caffe_gpu_convert<double, double>(const unsigned int n, const double* in, double* out) { caffe_copy(n, in, out); } #ifndef CPU_ONLY template<> void caffe_gpu_convert<float16, float16>(const unsigned int n, const float16* in, float16* out) { caffe_copy(n, in, out); } #endif template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float* X, hipStream_t str) { hipStream_t initial_stream; CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double* X, hipStream_t str) { hipStream_t initial_stream; CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template<> __global__ void add_kernel<half2>(const int n, const half2* a, const half2* b, half2* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = hadd2(a[index], b[index]); } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template<> void caffe_gpu_add<float16>(const int N, const float16* a, const float16* b, float16* y) { hipStream_t stream = Caffe::thread_stream(); const unsigned int n2 = even(N) / 2; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel), dim3(CAFFE_GET_BLOCKS_HALF(n2)), dim3(CAFFE_CUDA_NUM_THREADS_HALF), 0, stream, n2, reinterpret_cast<const half2*>(a), reinterpret_cast<const half2*>(b), reinterpret_cast<half2*>(y)); CUDA_POST_KERNEL_CHECK; CUDA_CHECK(hipStreamSynchronize(stream)); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <typename Dtype> __global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = sqrt(a[index]); } } template <> void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
c25eede8bdc6db4d36404ac6541086bb83b8ee4a.cu
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/gpu_math_functions.cuh" namespace caffe { template<typename T, typename TR> __global__ void convert_kernel(const unsigned int n, const T* in, TR* out) { CUDA_KERNEL_LOOP(i, n) { out[i] = in[i]; } } template<> __global__ void convert_kernel(const unsigned int n, const half2* in, float2* out) { CUDA_KERNEL_LOOP(i, n) { out[i] = __half22float2(in[i]); } } template<> __global__ void convert_kernel(const unsigned int n, const float2* in, half2* out) { CUDA_KERNEL_LOOP(i, n) { out[i] = float22half2_clip(in[i]); } } template<typename T, typename TR> void caffe_gpu_convert(const unsigned int N, const T* in, TR* out) { cudaStream_t stream = Caffe::thread_stream(); //LOG(INFO) << "caffe_gpu_convert stream = " << stream; // NOLINT_NEXT_LINE(whitespace/operators) convert_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, in, out); CUDA_POST_KERNEL_CHECK; CUDA_CHECK(cudaStreamSynchronize(stream)); } template<> void caffe_gpu_convert<float, float16>(const unsigned int n, const float* in, float16* out) { cudaStream_t stream = Caffe::thread_stream(); //LOG(INFO) << "caffe_gpu_convert stream = " << stream; const unsigned int n2 = even(n) / 2; // NOLINT_NEXT_LINE(whitespace/operators) convert_kernel<<<CAFFE_GET_BLOCKS_HALF(n2), CAFFE_CUDA_NUM_THREADS_HALF, 0, stream>>> (n2, reinterpret_cast<const float2*>(in), reinterpret_cast<half2*>(out)); CUDA_POST_KERNEL_CHECK; CUDA_CHECK(cudaStreamSynchronize(stream)); } template<> void caffe_gpu_convert<float16, float>(const unsigned int n, const float16* in, float* out) { cudaStream_t stream = Caffe::thread_stream(); const unsigned int n2 = even(n) / 2; // NOLINT_NEXT_LINE(whitespace/operators) convert_kernel<<<CAFFE_GET_BLOCKS_HALF(n2), CAFFE_CUDA_NUM_THREADS_HALF, 0, stream>>> (n2, reinterpret_cast<const half2*>(in), reinterpret_cast<float2*>(out)); CUDA_POST_KERNEL_CHECK; CUDA_CHECK(cudaStreamSynchronize(stream)); } template void caffe_gpu_convert<double, float16>(const unsigned int n, const double* in, float16* out); template void caffe_gpu_convert<float16, double>(const unsigned int n, const float16* in, double* out); template void caffe_gpu_convert<double, float>(const unsigned int n, const double* in, float* out); template void caffe_gpu_convert<float, double>(const unsigned int n, const float* in, double* out); template<> void caffe_gpu_convert<float, float>(const unsigned int n, const float* in, float* out) { caffe_copy(n, in, out); } template<> void caffe_gpu_convert<double, double>(const unsigned int n, const double* in, double* out) { caffe_copy(n, in, out); } #ifndef CPU_ONLY template<> void caffe_gpu_convert<float16, float16>(const unsigned int n, const float16* in, float16* out) { caffe_copy(n, in, out); } #endif template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float* X, cudaStream_t str) { cudaStream_t initial_stream; CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double* X, cudaStream_t str) { cudaStream_t initial_stream; CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template<> __global__ void add_kernel<half2>(const int n, const half2* a, const half2* b, half2* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = hadd2(a[index], b[index]); } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template<> void caffe_gpu_add<float16>(const int N, const float16* a, const float16* b, float16* y) { cudaStream_t stream = Caffe::thread_stream(); const unsigned int n2 = even(N) / 2; // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<<<CAFFE_GET_BLOCKS_HALF(n2), CAFFE_CUDA_NUM_THREADS_HALF, 0, stream>>> (n2, reinterpret_cast<const half2*>(a), reinterpret_cast<const half2*>(b), reinterpret_cast<half2*>(y)); CUDA_POST_KERNEL_CHECK; CUDA_CHECK(cudaStreamSynchronize(stream)); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <typename Dtype> __global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = sqrt(a[index]); } } template <> void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
3e2f81bb684ad46d47d7a89f14968e330421ba59.hip
// !!! This is a file automatically generated by hipify!!! #include "hip_util.cuh" /* NVIDIA GPU COMPATIBILITY */ void getGrid(unsigned long numElements, dim3 &grid, int device){ hipDeviceProp_t prop; hipGetDeviceProperties(&prop, device); grid = {(unsigned int)prop.maxGridSize[0],(unsigned int)prop.maxGridSize[1],(unsigned int)prop.maxGridSize[2]}; if(numElements < grid.x){ grid.x = numElements; grid.y = 1; grid.z = 1; } else{ grid.x = 65536; if(numElements < grid.x*grid.y){ grid.y = numElements/grid.x; grid.y++; grid.z = 1; } else if(numElements < grid.x*grid.y*grid.z){ grid.z = numElements/(grid.x*grid.y); grid.z++; } } } void checkDims(dim3 grid, dim3 block, int device){ hipDeviceProp_t prop; hipGetDeviceProperties(&prop, device); bool goodDims = true; if(grid.x > prop.maxGridSize[0]){ goodDims = false; } else if(grid.y > prop.maxGridSize[1]){ goodDims = false; } else if(grid.z > prop.maxGridSize[2]){ goodDims = false; } else if(block.x > prop.maxThreadsDim[0]){ goodDims = false; } else if(block.y > prop.maxThreadsDim[1]){ goodDims = false; } else if(block.z > prop.maxThreadsDim[2]){ goodDims = false; } else if(block.x*block.y*block.z > prop.maxThreadsPerBlock){ goodDims = false; } if(!goodDims){ std::cerr<<"ERROR: grid or block dims are invalid for given device"<<std::endl; exit(-1); //TODO replace with exception and make more specific //maybe make macro like CudaSafeCall() } } __host__ void cusolverCheckError(cusolverStatus_t cusolver_status){ switch (cusolver_status){ case CUSOLVER_STATUS_SUCCESS: std::cout<<"CUSOLVER_SUCCESS"<<std::endl; break; case CUSOLVER_STATUS_NOT_INITIALIZED: std::cout<<"CUSOLVER_STATUS_NOT_INITIALIZED"<<std::endl; exit(-1); case CUSOLVER_STATUS_ALLOC_FAILED: std::cout<<"CUSOLVER_STATUS_ALLOC_FAILED"<<std::endl; exit(-1); case CUSOLVER_STATUS_INVALID_VALUE: std::cout<<"CUSOLVER_STATUS_INVALID_VALUE"<<std::endl; exit(-1); case CUSOLVER_STATUS_ARCH_MISMATCH: std::cout<<"CUSOLVER_STATUS_ARCH_MISMATCH"<<std::endl; exit(-1); case CUSOLVER_STATUS_EXECUTION_FAILED: std::cout<<"CUSOLVER_STATUS_EXECUTION_FAILED"<<std::endl; exit(-1); case CUSOLVER_STATUS_INTERNAL_ERROR: std::cout<<"CUSOLVER_STATUS_INTERNAL_ERROR"<<std::endl; exit(-1); case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: std::cout<<"CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED"<<std::endl; exit(-1); } } //prints tx2 info relevant to cuda devlopment //citation: this method comes from the nvidia formums, and has been modified slightly void printDeviceProperties(){ std::cout<<"\n---------------START OF DEVICE PROPERTIES---------------\n"<<std::endl; int nDevices; hipGetDeviceCount(&nDevices); //find num of devices on tx2 for (int i = 0; i < nDevices; i++) //print info on each device { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" -Device name: %s\n\n", prop.name); printf(" -Memory\n -Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" -Memory Bus Width (bits): %d\n",prop.memoryBusWidth); printf(" -Peak Memory Bandwidth (GB/s): %f\n",2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6); printf(" -Total Global Memory (bytes): %lo\n", prop.totalGlobalMem); printf(" -Total Const Memory (bytes): %lo\n", prop.totalConstMem); printf(" -Max pitch allowed for memcpy in regions allocated by hipMallocPitch() (bytes): %lo\n\n", prop.memPitch); printf(" -Shared Memory per block (bytes): %lo\n", prop.sharedMemPerBlock); printf(" -Max number of threads per block: %d\n",prop.maxThreadsPerBlock); printf(" -Max number of blocks: %dx%dx%d\n",prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf(" -32bit Registers per block: %d\n", prop.regsPerBlock); printf(" -Threads per warp: %d\n\n", prop.warpSize); printf(" -Max Threads per Multiprocessor: %d\n", prop.maxThreadsPerMultiProcessor); printf(" -Total number of Multiprocessors: %d\n",prop.multiProcessorCount); printf(" -Shared Memory per Multiprocessor (bytes): %lo\n",prop.sharedMemPerMultiprocessor); printf(" -32bit Registers per Multiprocessor: %d\n", prop.regsPerMultiprocessor); printf(" -Number of asynchronous engines: %d\n", prop.asyncEngineCount); printf(" -Texture alignment requirement (bytes): %lo\n -Texture base addresses that are aligned to " "textureAlignment bytes do not need an offset applied to texture fetches.\n\n", prop.textureAlignment); printf(" -Device Compute Capability:\n -Major revision #: %d\n -Minor revision #: %d\n", prop.major, prop.minor); printf(" -Run time limit for kernels that get executed on this device: "); if(prop.kernelExecTimeoutEnabled) { printf("YES\n"); } else { printf("NO\n"); } printf(" -Device is "); if(prop.integrated) { printf("integrated. (motherboard)\n"); } else { printf("discrete. (card)\n\n"); } if(prop.isMultiGpuBoard) { printf(" -Device is on a MultiGPU configurations.\n\n"); } switch(prop.computeMode) { case(0): printf(" -Default compute mode (Multiple threads can use hipSetDevice() with this device)\n"); break; case(1): printf(" -Compute-exclusive-thread mode (Only one thread in one processwill be able to use\n hipSetDevice() with this device)\n"); break; case(2): printf(" -Compute-prohibited mode (No threads can use hipSetDevice() with this device)\n"); break; case(3): printf(" -Compute-exclusive-process mode (Many threads in one process will be able to use\n hipSetDevice() with this device)\n"); break; default: printf(" -GPU in unknown compute mode.\n"); break; } if(prop.canMapHostMemory) { printf("\n -The device can map host memory into the CUDA address space for use with\n hipHostMalloc() or hipHostGetDevicePointer().\n\n"); } else { printf("\n -The device CANNOT map host memory into the CUDA address space.\n\n"); } printf(" -ECC support: "); if(prop.ECCEnabled) { printf(" ON\n"); } else { printf(" OFF\n"); } printf(" -PCI Bus ID: %d\n", prop.pciBusID); printf(" -PCI Domain ID: %d\n", prop.pciDomainID); printf(" -PCI Device (slot) ID: %d\n", prop.pciDeviceID); printf(" -Using a TCC Driver: "); if(prop.tccDriver) { printf("YES\n"); } else { printf("NO\n"); } } std::cout<<"\n----------------END OF DEVICE PROPERTIES----------------\n"<<std::endl; } /* SIMPLE CUDA METHODS */ __device__ __host__ void printBits(size_t const size, void const * const ptr){ unsigned char *b = (unsigned char*) ptr; unsigned char byte; int i, j; printf("bits - "); for (i=size-1;i>=0;i--){ for (j=7;j>=0;j--){ byte = (b[i] >> j) & 1; printf("%u", byte); } } printf("\n"); }
3e2f81bb684ad46d47d7a89f14968e330421ba59.cu
#include "cuda_util.cuh" /* NVIDIA GPU COMPATIBILITY */ void getGrid(unsigned long numElements, dim3 &grid, int device){ cudaDeviceProp prop; cudaGetDeviceProperties(&prop, device); grid = {(unsigned int)prop.maxGridSize[0],(unsigned int)prop.maxGridSize[1],(unsigned int)prop.maxGridSize[2]}; if(numElements < grid.x){ grid.x = numElements; grid.y = 1; grid.z = 1; } else{ grid.x = 65536; if(numElements < grid.x*grid.y){ grid.y = numElements/grid.x; grid.y++; grid.z = 1; } else if(numElements < grid.x*grid.y*grid.z){ grid.z = numElements/(grid.x*grid.y); grid.z++; } } } void checkDims(dim3 grid, dim3 block, int device){ cudaDeviceProp prop; cudaGetDeviceProperties(&prop, device); bool goodDims = true; if(grid.x > prop.maxGridSize[0]){ goodDims = false; } else if(grid.y > prop.maxGridSize[1]){ goodDims = false; } else if(grid.z > prop.maxGridSize[2]){ goodDims = false; } else if(block.x > prop.maxThreadsDim[0]){ goodDims = false; } else if(block.y > prop.maxThreadsDim[1]){ goodDims = false; } else if(block.z > prop.maxThreadsDim[2]){ goodDims = false; } else if(block.x*block.y*block.z > prop.maxThreadsPerBlock){ goodDims = false; } if(!goodDims){ std::cerr<<"ERROR: grid or block dims are invalid for given device"<<std::endl; exit(-1); //TODO replace with exception and make more specific //maybe make macro like CudaSafeCall() } } __host__ void cusolverCheckError(cusolverStatus_t cusolver_status){ switch (cusolver_status){ case CUSOLVER_STATUS_SUCCESS: std::cout<<"CUSOLVER_SUCCESS"<<std::endl; break; case CUSOLVER_STATUS_NOT_INITIALIZED: std::cout<<"CUSOLVER_STATUS_NOT_INITIALIZED"<<std::endl; exit(-1); case CUSOLVER_STATUS_ALLOC_FAILED: std::cout<<"CUSOLVER_STATUS_ALLOC_FAILED"<<std::endl; exit(-1); case CUSOLVER_STATUS_INVALID_VALUE: std::cout<<"CUSOLVER_STATUS_INVALID_VALUE"<<std::endl; exit(-1); case CUSOLVER_STATUS_ARCH_MISMATCH: std::cout<<"CUSOLVER_STATUS_ARCH_MISMATCH"<<std::endl; exit(-1); case CUSOLVER_STATUS_EXECUTION_FAILED: std::cout<<"CUSOLVER_STATUS_EXECUTION_FAILED"<<std::endl; exit(-1); case CUSOLVER_STATUS_INTERNAL_ERROR: std::cout<<"CUSOLVER_STATUS_INTERNAL_ERROR"<<std::endl; exit(-1); case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: std::cout<<"CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED"<<std::endl; exit(-1); } } //prints tx2 info relevant to cuda devlopment //citation: this method comes from the nvidia formums, and has been modified slightly void printDeviceProperties(){ std::cout<<"\n---------------START OF DEVICE PROPERTIES---------------\n"<<std::endl; int nDevices; cudaGetDeviceCount(&nDevices); //find num of devices on tx2 for (int i = 0; i < nDevices; i++) //print info on each device { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" -Device name: %s\n\n", prop.name); printf(" -Memory\n -Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" -Memory Bus Width (bits): %d\n",prop.memoryBusWidth); printf(" -Peak Memory Bandwidth (GB/s): %f\n",2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6); printf(" -Total Global Memory (bytes): %lo\n", prop.totalGlobalMem); printf(" -Total Const Memory (bytes): %lo\n", prop.totalConstMem); printf(" -Max pitch allowed for memcpy in regions allocated by cudaMallocPitch() (bytes): %lo\n\n", prop.memPitch); printf(" -Shared Memory per block (bytes): %lo\n", prop.sharedMemPerBlock); printf(" -Max number of threads per block: %d\n",prop.maxThreadsPerBlock); printf(" -Max number of blocks: %dx%dx%d\n",prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf(" -32bit Registers per block: %d\n", prop.regsPerBlock); printf(" -Threads per warp: %d\n\n", prop.warpSize); printf(" -Max Threads per Multiprocessor: %d\n", prop.maxThreadsPerMultiProcessor); printf(" -Total number of Multiprocessors: %d\n",prop.multiProcessorCount); printf(" -Shared Memory per Multiprocessor (bytes): %lo\n",prop.sharedMemPerMultiprocessor); printf(" -32bit Registers per Multiprocessor: %d\n", prop.regsPerMultiprocessor); printf(" -Number of asynchronous engines: %d\n", prop.asyncEngineCount); printf(" -Texture alignment requirement (bytes): %lo\n -Texture base addresses that are aligned to " "textureAlignment bytes do not need an offset applied to texture fetches.\n\n", prop.textureAlignment); printf(" -Device Compute Capability:\n -Major revision #: %d\n -Minor revision #: %d\n", prop.major, prop.minor); printf(" -Run time limit for kernels that get executed on this device: "); if(prop.kernelExecTimeoutEnabled) { printf("YES\n"); } else { printf("NO\n"); } printf(" -Device is "); if(prop.integrated) { printf("integrated. (motherboard)\n"); } else { printf("discrete. (card)\n\n"); } if(prop.isMultiGpuBoard) { printf(" -Device is on a MultiGPU configurations.\n\n"); } switch(prop.computeMode) { case(0): printf(" -Default compute mode (Multiple threads can use cudaSetDevice() with this device)\n"); break; case(1): printf(" -Compute-exclusive-thread mode (Only one thread in one processwill be able to use\n cudaSetDevice() with this device)\n"); break; case(2): printf(" -Compute-prohibited mode (No threads can use cudaSetDevice() with this device)\n"); break; case(3): printf(" -Compute-exclusive-process mode (Many threads in one process will be able to use\n cudaSetDevice() with this device)\n"); break; default: printf(" -GPU in unknown compute mode.\n"); break; } if(prop.canMapHostMemory) { printf("\n -The device can map host memory into the CUDA address space for use with\n cudaHostAlloc() or cudaHostGetDevicePointer().\n\n"); } else { printf("\n -The device CANNOT map host memory into the CUDA address space.\n\n"); } printf(" -ECC support: "); if(prop.ECCEnabled) { printf(" ON\n"); } else { printf(" OFF\n"); } printf(" -PCI Bus ID: %d\n", prop.pciBusID); printf(" -PCI Domain ID: %d\n", prop.pciDomainID); printf(" -PCI Device (slot) ID: %d\n", prop.pciDeviceID); printf(" -Using a TCC Driver: "); if(prop.tccDriver) { printf("YES\n"); } else { printf("NO\n"); } } std::cout<<"\n----------------END OF DEVICE PROPERTIES----------------\n"<<std::endl; } /* SIMPLE CUDA METHODS */ __device__ __host__ void printBits(size_t const size, void const * const ptr){ unsigned char *b = (unsigned char*) ptr; unsigned char byte; int i, j; printf("bits - "); for (i=size-1;i>=0;i--){ for (j=7;j>=0;j--){ byte = (b[i] >> j) & 1; printf("%u", byte); } } printf("\n"); }
415395715e8af3aa1776e9f1eb1ce448cf4e15cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/objective_functions/weight_regularization/l2.hpp" #include "lbann/models/model.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { template <typename TensorDataType, El::Int block_size> __global__ void accumulate_contribution_kernel(El::Int height, El::Int width, const TensorDataType * __restrict__ vals, El::Int vals_ldim, DataType * __restrict__ contribution) { // Indices const El::Int tid = threadIdx.x; const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; const El::Int nthreads = blockDim.x * gridDim.x; // Compute contributions for each thread TensorDataType private_contribution = 0; const auto& size = height * width; for (El::Int i = gid; i < size; i += nthreads) { const auto& row = i % height; const auto& col = i / height; const auto& val = vals[row + col * vals_ldim]; private_contribution += val * val; } // Shared memory reduction to get contribution for each block /// @todo unroll loops __shared__ TensorDataType shared_contribution[block_size]; shared_contribution[tid] = private_contribution; for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if (tid < stride) { shared_contribution[tid] += shared_contribution[tid + stride]; } } if (tid == 0) { gpu_lib::atomic_add(contribution, shared_contribution[0]); } } } // namespace template <> void l2_weight_regularization::accumulate_contribution<El::Device::GPU>( const El::Matrix<AccumulateDataType, El::Device::GPU>& vals, El::Matrix<AccumulateDataType, El::Device::GPU>& contribution) { if (!vals.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(contribution), gpu::get_sync_info(vals)); const auto& size = vals.Height() * vals.Width(); const El::Int block_size = 256; const auto& grid_size = (size + block_size - 1) / block_size; hydrogen::gpu::LaunchKernel( accumulate_contribution_kernel<AccumulateDataType, block_size>, grid_size, block_size, 0, multisync, vals.Height(), vals.Width(), vals.LockedBuffer(), vals.LDim(), contribution.Buffer()); } } } // namespace lbann
415395715e8af3aa1776e9f1eb1ce448cf4e15cf.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/objective_functions/weight_regularization/l2.hpp" #include "lbann/models/model.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { template <typename TensorDataType, El::Int block_size> __global__ void accumulate_contribution_kernel(El::Int height, El::Int width, const TensorDataType * __restrict__ vals, El::Int vals_ldim, DataType * __restrict__ contribution) { // Indices const El::Int tid = threadIdx.x; const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; const El::Int nthreads = blockDim.x * gridDim.x; // Compute contributions for each thread TensorDataType private_contribution = 0; const auto& size = height * width; for (El::Int i = gid; i < size; i += nthreads) { const auto& row = i % height; const auto& col = i / height; const auto& val = vals[row + col * vals_ldim]; private_contribution += val * val; } // Shared memory reduction to get contribution for each block /// @todo unroll loops __shared__ TensorDataType shared_contribution[block_size]; shared_contribution[tid] = private_contribution; for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if (tid < stride) { shared_contribution[tid] += shared_contribution[tid + stride]; } } if (tid == 0) { gpu_lib::atomic_add(contribution, shared_contribution[0]); } } } // namespace template <> void l2_weight_regularization::accumulate_contribution<El::Device::GPU>( const El::Matrix<AccumulateDataType, El::Device::GPU>& vals, El::Matrix<AccumulateDataType, El::Device::GPU>& contribution) { if (!vals.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(contribution), gpu::get_sync_info(vals)); const auto& size = vals.Height() * vals.Width(); const El::Int block_size = 256; const auto& grid_size = (size + block_size - 1) / block_size; hydrogen::gpu::LaunchKernel( accumulate_contribution_kernel<AccumulateDataType, block_size>, grid_size, block_size, 0, multisync, vals.Height(), vals.Width(), vals.LockedBuffer(), vals.LDim(), contribution.Buffer()); } } } // namespace lbann
703299e8b9b50fc9b54e2aec3e3df3a849437954.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "kernel.h" #include "reducedMathPlugin.h" #include <iostream> using nvinfer1::plugin::reduced_divisor; template <unsigned nthdsPerCTA> __launch_bounds__(nthdsPerCTA) __global__ void gridAnchorKernel(const GridAnchorParameters param, const int numAspectRatios, reduced_divisor divObj, const float* widths, const float* heights, float* outputData) { // output dims: (H, W, param.numMinSize, (1+haveMaxSize+numAR-1), 4) const int dim = param.H * param.W * numAspectRatios; /* * Parameters used to calculate the bounding box coordinates back to input image scale * Normally we calculate the anchorStride = image_input_size (in pixel) / feature_map_size * Here we do not use image_input_size for the moment * Instead we use 1.0 * The coordinates calculated are scaled by the input image size. * Most of the coordinates will be in a range of [0, 1], except for the bounding box coordinates going outside of * the image Every coordinate will go back to the pixel coordinates in the input image if being multiplied by * image_input_size Here we implicitly assumes the image input and feature map are square */ float anchorStride = (1.0 / param.H); float anchorOffset = 0.5 * anchorStride; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= dim) return; int arId, currIndex; divObj.divmod(tid, currIndex, arId); const int w = currIndex % param.W; const int h = currIndex / param.W; // Center coordinates float yC = h * anchorStride + anchorOffset; float xC = w * anchorStride + anchorOffset; // x_min, y_min float xMin = xC - 0.5 * widths[arId]; float yMin = yC - 0.5 * heights[arId]; // x_max, y_max float xMax = xC + 0.5 * widths[arId]; float yMax = yC + 0.5 * heights[arId]; outputData[tid * 4] = xMin; outputData[tid * 4 + 1] = yMin; outputData[tid * 4 + 2] = xMax; outputData[tid * 4 + 3] = yMax; // Remember to move the output cursor float* output = outputData + dim * 4; // Simply copying the variance output[tid * 4] = param.variance[0]; output[tid * 4 + 1] = param.variance[1]; output[tid * 4 + 2] = param.variance[2]; output[tid * 4 + 3] = param.variance[3]; } pluginStatus_t anchorGridInference(hipStream_t stream, const GridAnchorParameters param, const int numAspectRatios, const void* widths, const void* heights, void* outputData) { const int dim = param.H * param.W * numAspectRatios; reduced_divisor divObj(numAspectRatios); if (dim > 5120) { const int BS = 128; const int GS = (dim + BS - 1) / BS; hipLaunchKernelGGL(( gridAnchorKernel<BS>), dim3(GS), dim3(BS), 0, stream, param, numAspectRatios, divObj, (const float*) widths, (const float*) heights, (float*) outputData); } else { const int BS = 32; const int GS = (dim + BS - 1) / BS; hipLaunchKernelGGL(( gridAnchorKernel<BS>), dim3(GS), dim3(BS), 0, stream, param, numAspectRatios, divObj, (const float*) widths, (const float*) heights, (float*) outputData); } CSC(hipGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } namespace nvinfer1 { namespace plugin { pluginStatus_t anchorGridInference(hipStream_t stream, const GridAnchorParameters param, const int numAspectRatios, const void* widths, const void* heights, void* outputData) { const int dim = param.H * param.W * numAspectRatios; reduced_divisor divObj(numAspectRatios); if (dim > 5120) { const int BS = 128; const int GS = (dim + BS - 1) / BS; hipLaunchKernelGGL(( gridAnchorKernel<BS>), dim3(GS), dim3(BS), 0, stream, param, numAspectRatios, divObj, (const float*) widths, (const float*) heights, (float*) outputData); } else { const int BS = 32; const int GS = (dim + BS - 1) / BS; hipLaunchKernelGGL(( gridAnchorKernel<BS>), dim3(GS), dim3(BS), 0, stream, param, numAspectRatios, divObj, (const float*) widths, (const float*) heights, (float*) outputData); } CSC(hipGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } } // namespace plugin } // namespace nvinfer1
703299e8b9b50fc9b54e2aec3e3df3a849437954.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "kernel.h" #include "reducedMathPlugin.h" #include <iostream> using nvinfer1::plugin::reduced_divisor; template <unsigned nthdsPerCTA> __launch_bounds__(nthdsPerCTA) __global__ void gridAnchorKernel(const GridAnchorParameters param, const int numAspectRatios, reduced_divisor divObj, const float* widths, const float* heights, float* outputData) { // output dims: (H, W, param.numMinSize, (1+haveMaxSize+numAR-1), 4) const int dim = param.H * param.W * numAspectRatios; /* * Parameters used to calculate the bounding box coordinates back to input image scale * Normally we calculate the anchorStride = image_input_size (in pixel) / feature_map_size * Here we do not use image_input_size for the moment * Instead we use 1.0 * The coordinates calculated are scaled by the input image size. * Most of the coordinates will be in a range of [0, 1], except for the bounding box coordinates going outside of * the image Every coordinate will go back to the pixel coordinates in the input image if being multiplied by * image_input_size Here we implicitly assumes the image input and feature map are square */ float anchorStride = (1.0 / param.H); float anchorOffset = 0.5 * anchorStride; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= dim) return; int arId, currIndex; divObj.divmod(tid, currIndex, arId); const int w = currIndex % param.W; const int h = currIndex / param.W; // Center coordinates float yC = h * anchorStride + anchorOffset; float xC = w * anchorStride + anchorOffset; // x_min, y_min float xMin = xC - 0.5 * widths[arId]; float yMin = yC - 0.5 * heights[arId]; // x_max, y_max float xMax = xC + 0.5 * widths[arId]; float yMax = yC + 0.5 * heights[arId]; outputData[tid * 4] = xMin; outputData[tid * 4 + 1] = yMin; outputData[tid * 4 + 2] = xMax; outputData[tid * 4 + 3] = yMax; // Remember to move the output cursor float* output = outputData + dim * 4; // Simply copying the variance output[tid * 4] = param.variance[0]; output[tid * 4 + 1] = param.variance[1]; output[tid * 4 + 2] = param.variance[2]; output[tid * 4 + 3] = param.variance[3]; } pluginStatus_t anchorGridInference(cudaStream_t stream, const GridAnchorParameters param, const int numAspectRatios, const void* widths, const void* heights, void* outputData) { const int dim = param.H * param.W * numAspectRatios; reduced_divisor divObj(numAspectRatios); if (dim > 5120) { const int BS = 128; const int GS = (dim + BS - 1) / BS; gridAnchorKernel<BS><<<GS, BS, 0, stream>>>( param, numAspectRatios, divObj, (const float*) widths, (const float*) heights, (float*) outputData); } else { const int BS = 32; const int GS = (dim + BS - 1) / BS; gridAnchorKernel<BS><<<GS, BS, 0, stream>>>( param, numAspectRatios, divObj, (const float*) widths, (const float*) heights, (float*) outputData); } CSC(cudaGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } namespace nvinfer1 { namespace plugin { pluginStatus_t anchorGridInference(cudaStream_t stream, const GridAnchorParameters param, const int numAspectRatios, const void* widths, const void* heights, void* outputData) { const int dim = param.H * param.W * numAspectRatios; reduced_divisor divObj(numAspectRatios); if (dim > 5120) { const int BS = 128; const int GS = (dim + BS - 1) / BS; gridAnchorKernel<BS><<<GS, BS, 0, stream>>>( param, numAspectRatios, divObj, (const float*) widths, (const float*) heights, (float*) outputData); } else { const int BS = 32; const int GS = (dim + BS - 1) / BS; gridAnchorKernel<BS><<<GS, BS, 0, stream>>>( param, numAspectRatios, divObj, (const float*) widths, (const float*) heights, (float*) outputData); } CSC(cudaGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } } // namespace plugin } // namespace nvinfer1
f7ea659bb5ad5f153fa14aabfbc315c1d0439c28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kSqrt(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) target[i] = sqrt(mat[i]); }
f7ea659bb5ad5f153fa14aabfbc315c1d0439c28.cu
#include "includes.h" __global__ void kSqrt(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) target[i] = sqrt(mat[i]); }
de545fb2c5bf46ae2f2d70bef8d65fd7618bc2eb.hip
// !!! This is a file automatically generated by hipify!!! /*The MIT License (MIT) Copyright (c) [2015] [Sawyer Hopkins] Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.*/ #include "LJPotential.h" using namespace simulation; LennardJones::~LennardJones() { delete[] &cutOff; delete[] &debyeLength; delete[] &kT; delete[] &radius; delete[] &yukStr; delete[] &mass; delete[] &ljNum; } __device__ LennardJones::LennardJones(float * vars) { kT = vars[0]; //Get the radius radius = vars[1]; //Get the mass mass = vars[2]; //Get the well depth yukStr = vars[3]; //Get the well depth ljNum = vars[4]; //Get the cutoff range cutOff = vars[5]; cutOffSquared = cutOff*cutOff; //Get the debye length for the system. debyeLength = vars[6]; debyeInv = 1.0 / debyeLength; output = true; } LennardJones::LennardJones(configReader::config* cfg) { //Sets the name //name = "Lennard Jones"; kT = cfg->getParam<float>("kT", 10.0); //Get the radius radius = cfg->getParam<float>("radius",0.5); //Get the mass mass = cfg->getParam<float>("mass",1.0); //Get the well depth yukStr = cfg->getParam<float>("yukawaStrength",8.0); //Get the well depth ljNum = cfg->getParam<int>("ljNum",18.0); //Get the cutoff range cutOff = cfg->getParam<float>("cutOff",2.5); cutOffSquared = cutOff*cutOff; //Get the debye length for the system. debyeLength = cfg->getParam<float>("debyeLength",0.5); debyeInv = 1.0 / debyeLength; output = true; size = sizeof(*this); utilities::util::writeTerminal("---Lennard Jones Potential successfully added.\n\n", utilities::Colour::Cyan); } __device__ void LennardJones::cudaTest() { printf("kT: %f\n",kT); printf("radius: %f\n",radius); printf("mass: %f\n",mass); printf("yukStr: %f\n",yukStr); printf("ljNum: %d\n",ljNum); printf("cutoff: %f\n",cutOff); printf("debyeLength: %f\n",debyeLength); } __device__ void LennardJones::iterCells(int* boxSize, particle* index, cell* itemCell) { float pot = 0; int i = 0; int max = itemCell->gridCounter; while (i < max) { particle* it = itemCell->members[i]; if (it->getName() != index->getName()) { //Distance between the two particles. float rSquared = utilities::util::pbcDist(index->getX(), index->getY(), index->getZ(), it->getX(), it->getY(), it->getZ(), *boxSize); //If the particles are in the potential well. if (rSquared < cutOffSquared) { float r = sqrt(rSquared); //If the particles overlap there are problems. float size = (index->getRadius() + it->getRadius()); if(r< (0.8*size) ) { debugging::error::throwParticleOverlapError(index->getName(), it->getName(), r); } //------------------------------------- //-----------FORCE CALCULATION--------- //------------------------------------- //Predefinitions. float RadiusOverR = (size / r); float rOverDebye = (r * debyeInv); float rInv = (1.0 / r); float DebyeShift = (debyeLength + r); float yukExp = ::exp(-rOverDebye); //float LJ = ::pow(RadiusOverR,ljNum); float LJ = utilities::util::powBinaryDecomp(RadiusOverR,ljNum); //Attractive LJ. float attract = ((2.0*LJ) - 1.0); attract *= (4.0*ljNum*rInv*LJ); //Repulsive Yukawa. float repel = yukExp; repel *= (rInv*rInv*DebyeShift*yukStr); float fNet = -kT*(attract+repel); /* if ((fNet*fNet) > (80*80)) { printf("%f %d %d %d %d %f %f --- i: %d, %d, %d --- j: %d, %d, %d\n", fNet, it->getName(), index->getName(), i, max, rSquared, cutOffSquared, index->getCX(), index->getCY(), index->getCZ(), it->getCX(), it->getCY(), it->getCZ()); } */ //Positive is attractive; Negative repulsive. //fNet = -fNet; //------------------------------------- //---------POTENTIAL CALCULATION------- //------------------------------------- if (r < 1.1) { float ljPot = (LJ - 1.0); ljPot *= (4.0*LJ); float yukPot = yukExp; yukPot *= (debyeLength*yukStr*rInv); pot += (kT*yukPot); pot += (kT*ljPot); } //------------------------------------- //------NORMALIZATION AND SETTING------ //------------------------------------- //Normalize the force. float unitVec[3] {0.0,0.0,0.0}; utilities::util::unitVectorAdv(index->getX(), index->getY(), index->getZ(), it->getX(), it->getY(), it->getZ(), unitVec, r, *boxSize); //Updates the acceleration.; float fx = fNet*unitVec[0]; float fy = fNet*unitVec[1]; float fz = fNet*unitVec[2]; //If the force is infinite then there are worse problems. if (isnan(fNet)) { //This error should only get thrown in the case of numerical instability. debugging::error::throwInfiniteForce(); } //Add to the net force on the particle. if (r < 1.1) { index->updateForce(fx,fy,fz,pot,it); } else { index->updateForce(fx,fy,fz,pot,it,false); } } } i++; } } __device__ void LennardJones::getAcceleration(int* nPart, int* boxSize, int* cellScale ,float* time, simulation::cell* cells, simulation::particle* items) { int index = (blockDim.x * blockIdx.x) + threadIdx.x; if (index >= *nPart) return; int cellIndex = items[index].getCX() + (items[index].getCY() * (*cellScale)) + (items[index].getCZ() * (*cellScale) * (*cellScale)); items[index].nextIter(); int i = 0; while (i < 27) { iterCells(boxSize, &(items[index]), cells[cellIndex].getNeighbor(i)); i++; } }
de545fb2c5bf46ae2f2d70bef8d65fd7618bc2eb.cu
/*The MIT License (MIT) Copyright (c) [2015] [Sawyer Hopkins] Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.*/ #include "LJPotential.h" using namespace simulation; LennardJones::~LennardJones() { delete[] &cutOff; delete[] &debyeLength; delete[] &kT; delete[] &radius; delete[] &yukStr; delete[] &mass; delete[] &ljNum; } __device__ LennardJones::LennardJones(float * vars) { kT = vars[0]; //Get the radius radius = vars[1]; //Get the mass mass = vars[2]; //Get the well depth yukStr = vars[3]; //Get the well depth ljNum = vars[4]; //Get the cutoff range cutOff = vars[5]; cutOffSquared = cutOff*cutOff; //Get the debye length for the system. debyeLength = vars[6]; debyeInv = 1.0 / debyeLength; output = true; } LennardJones::LennardJones(configReader::config* cfg) { //Sets the name //name = "Lennard Jones"; kT = cfg->getParam<float>("kT", 10.0); //Get the radius radius = cfg->getParam<float>("radius",0.5); //Get the mass mass = cfg->getParam<float>("mass",1.0); //Get the well depth yukStr = cfg->getParam<float>("yukawaStrength",8.0); //Get the well depth ljNum = cfg->getParam<int>("ljNum",18.0); //Get the cutoff range cutOff = cfg->getParam<float>("cutOff",2.5); cutOffSquared = cutOff*cutOff; //Get the debye length for the system. debyeLength = cfg->getParam<float>("debyeLength",0.5); debyeInv = 1.0 / debyeLength; output = true; size = sizeof(*this); utilities::util::writeTerminal("---Lennard Jones Potential successfully added.\n\n", utilities::Colour::Cyan); } __device__ void LennardJones::cudaTest() { printf("kT: %f\n",kT); printf("radius: %f\n",radius); printf("mass: %f\n",mass); printf("yukStr: %f\n",yukStr); printf("ljNum: %d\n",ljNum); printf("cutoff: %f\n",cutOff); printf("debyeLength: %f\n",debyeLength); } __device__ void LennardJones::iterCells(int* boxSize, particle* index, cell* itemCell) { float pot = 0; int i = 0; int max = itemCell->gridCounter; while (i < max) { particle* it = itemCell->members[i]; if (it->getName() != index->getName()) { //Distance between the two particles. float rSquared = utilities::util::pbcDist(index->getX(), index->getY(), index->getZ(), it->getX(), it->getY(), it->getZ(), *boxSize); //If the particles are in the potential well. if (rSquared < cutOffSquared) { float r = sqrt(rSquared); //If the particles overlap there are problems. float size = (index->getRadius() + it->getRadius()); if(r< (0.8*size) ) { debugging::error::throwParticleOverlapError(index->getName(), it->getName(), r); } //------------------------------------- //-----------FORCE CALCULATION--------- //------------------------------------- //Predefinitions. float RadiusOverR = (size / r); float rOverDebye = (r * debyeInv); float rInv = (1.0 / r); float DebyeShift = (debyeLength + r); float yukExp = std::exp(-rOverDebye); //float LJ = std::pow(RadiusOverR,ljNum); float LJ = utilities::util::powBinaryDecomp(RadiusOverR,ljNum); //Attractive LJ. float attract = ((2.0*LJ) - 1.0); attract *= (4.0*ljNum*rInv*LJ); //Repulsive Yukawa. float repel = yukExp; repel *= (rInv*rInv*DebyeShift*yukStr); float fNet = -kT*(attract+repel); /* if ((fNet*fNet) > (80*80)) { printf("%f %d %d %d %d %f %f --- i: %d, %d, %d --- j: %d, %d, %d\n", fNet, it->getName(), index->getName(), i, max, rSquared, cutOffSquared, index->getCX(), index->getCY(), index->getCZ(), it->getCX(), it->getCY(), it->getCZ()); } */ //Positive is attractive; Negative repulsive. //fNet = -fNet; //------------------------------------- //---------POTENTIAL CALCULATION------- //------------------------------------- if (r < 1.1) { float ljPot = (LJ - 1.0); ljPot *= (4.0*LJ); float yukPot = yukExp; yukPot *= (debyeLength*yukStr*rInv); pot += (kT*yukPot); pot += (kT*ljPot); } //------------------------------------- //------NORMALIZATION AND SETTING------ //------------------------------------- //Normalize the force. float unitVec[3] {0.0,0.0,0.0}; utilities::util::unitVectorAdv(index->getX(), index->getY(), index->getZ(), it->getX(), it->getY(), it->getZ(), unitVec, r, *boxSize); //Updates the acceleration.; float fx = fNet*unitVec[0]; float fy = fNet*unitVec[1]; float fz = fNet*unitVec[2]; //If the force is infinite then there are worse problems. if (isnan(fNet)) { //This error should only get thrown in the case of numerical instability. debugging::error::throwInfiniteForce(); } //Add to the net force on the particle. if (r < 1.1) { index->updateForce(fx,fy,fz,pot,it); } else { index->updateForce(fx,fy,fz,pot,it,false); } } } i++; } } __device__ void LennardJones::getAcceleration(int* nPart, int* boxSize, int* cellScale ,float* time, simulation::cell* cells, simulation::particle* items) { int index = (blockDim.x * blockIdx.x) + threadIdx.x; if (index >= *nPart) return; int cellIndex = items[index].getCX() + (items[index].getCY() * (*cellScale)) + (items[index].getCZ() * (*cellScale) * (*cellScale)); items[index].nextIter(); int i = 0; while (i < 27) { iterCells(boxSize, &(items[index]), cells[cellIndex].getNeighbor(i)); i++; } }
cb88cab594a055be7f7de11ad2c14ca68303421c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14) { if (comp < (var_1 / (-1.2389E-44f / asinf(var_2 / +1.4917E35f + var_3 * (var_4 - (var_5 - +1.2951E-43f)))))) { float tmp_1 = -1.5543E-35f; float tmp_2 = var_6 - var_7 + atanf((+1.9958E7f / var_8 / (var_9 + -1.8601E-44f - -1.0522E-37f))); comp = tmp_2 * tmp_1 - (var_10 + var_11 / sqrtf((var_12 / (var_13 / var_14)))); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15); hipDeviceSynchronize(); return 0; }
cb88cab594a055be7f7de11ad2c14ca68303421c.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14) { if (comp < (var_1 / (-1.2389E-44f / asinf(var_2 / +1.4917E35f + var_3 * (var_4 - (var_5 - +1.2951E-43f)))))) { float tmp_1 = -1.5543E-35f; float tmp_2 = var_6 - var_7 + atanf((+1.9958E7f / var_8 / (var_9 + -1.8601E-44f - -1.0522E-37f))); comp = tmp_2 * tmp_1 - (var_10 + var_11 / sqrtf((var_12 / (var_13 / var_14)))); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15); cudaDeviceSynchronize(); return 0; }
725e30ac283a16ca5a58b17e1ee22d3d70290e63.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pmpp__convolution.h" __global__ void pmpp__2d_convolution_kernel(double *input, double *output, const int width, const int height, const double* __restrict__ mask, const int mask_width){ extern __shared__ double shared[]; const int O_TILE_WIDTH = blockDim.x; //The output tile width is equal to the blockDim.x const int SHARED_STRIDE = blockDim.x + mask_width - 1; //The stride to the linearized shared memory array int tx = threadIdx.x; int ty = threadIdx.y; //The output array coordinates int row_o = blockIdx.y*O_TILE_WIDTH + ty; int col_o = blockIdx.x*O_TILE_WIDTH + tx; //The input array coordinates int row_i = row_o - mask_width/2; int col_i = col_o - mask_width/2; //Loads the input values or zero(to the ghosts and halo cells) if(row_i >= 0 && row_i < height && col_i >=0 && col_i < width){ shared[ty*SHARED_STRIDE + tx] = input[row_i * width + col_i]; }else{ shared[ty*SHARED_STRIDE + tx] = 0.0; } __syncthreads(); double value = 0.0; if(ty < O_TILE_WIDTH && tx < O_TILE_WIDTH){ //Check if the thread index is into the output tile width for(int i = 0; i < mask_width; i++){ // Gets the rows and cols of the mask for(int j = 0; j < mask_width; j++){ value += mask[i*mask_width + j] * shared[(i+ty)*SHARED_STRIDE + (j+tx)]; // Performs the convolution } } if(row_o < height && col_o < width){ //Check if thread indexes are into the output domain output[row_o*width + col_o] = value; } } } void pmpp__2d_convolution_host(double *input, double *output, const int width, const int height, const double *mask, const int mask_width){ int ghosts_by_side = mask_width/2; double sum; int input_row, input_col; for(int output_row = 0; output_row < height; output_row++){ // Iterates through each output position (row and col) to calculate it for(int output_col = 0; output_col < width; output_col++){ sum = 0; for(int mask_row = 0; mask_row < mask_width; mask_row++){ // Iterates through each mask position (row) to get it input_row = output_row - ghosts_by_side + mask_row; //Calculates the input row index to be accessed if(input_row >= 0 && input_row < height ){ //Checks if the row has input values or only ghosts for(int mask_col = 0; mask_col < mask_width; mask_col++){ // Iterates through each mask position (col) to get it input_col = output_col - ghosts_by_side + mask_col; //Calculates the input col index to be accessed if(input_col >= 0 && input_col < width){ //Checks if the col of the current row is a input value or a ghost cell sum += input[input_row*width + input_col]*mask[mask_row*mask_width + mask_col]; //Performs the convolution } } output[output_row*width + output_col] = sum; } } } } } void pmpp__1d_convolution_host(double *input, double *output, const int length, const double *mask, const int mask_width){ int ghosts_by_side = mask_width/2; double sum; int input_idx; for(int out_idx = 0; out_idx < length; out_idx++){ // Iterates through each output position to calculate it sum = 0; for(int mask_idx = 0; mask_idx < mask_width; mask_idx++){ // Iterates through each mask position input_idx = out_idx - ghosts_by_side + mask_idx; // Calculates the input index if(input_idx >= 0 && input_idx < length){ // Check if the input index is not a ghost sum+=input[input_idx]*mask[mask_idx]; //Performs the convolution } } output[out_idx] = sum; } } __global__ void pmpp__1d_convolution_kernel(double *input, double *output, const int length, double *mask, const int mask_width){ int tid = blockIdx.x*blockDim.x + threadIdx.x; extern __shared__ double shared[]; // Each thread loads data from global to the block shared memory shared[threadIdx.x] = tid < length ? input[tid] : 0.0; __syncthreads(); // Defines the data index that belongs to each tile int this_tile_start_point = blockIdx.x * blockDim.x; int next_tile_start_point = (blockIdx.x + 1) * blockDim.x; // Go back int(mask_width/2) positions in order to start from the block scope external cells (halos or ghosts placed before the this_tile_start_point position) int n_start_point = tid - (mask_width/2); double p = 0; for(int j = 0; j < mask_width; j++){ int n_index = n_start_point + j; if(n_index >= 0 && n_index < length){ //Check if the n_index not refers to a ghost cell if(n_index >= this_tile_start_point && n_index < next_tile_start_point){ // If is an internal cell (true) or a halo cell (false) p += shared[threadIdx.x + j - mask_width/2]*mask[j]; }else{ p += input[n_index] * mask[j]; //Takes the N[value] from the cache (Luckily!) or from the global memory and performs the convolution } } } output[tid] = p; }
725e30ac283a16ca5a58b17e1ee22d3d70290e63.cu
#include "pmpp__convolution.h" __global__ void pmpp__2d_convolution_kernel(double *input, double *output, const int width, const int height, const double* __restrict__ mask, const int mask_width){ extern __shared__ double shared[]; const int O_TILE_WIDTH = blockDim.x; //The output tile width is equal to the blockDim.x const int SHARED_STRIDE = blockDim.x + mask_width - 1; //The stride to the linearized shared memory array int tx = threadIdx.x; int ty = threadIdx.y; //The output array coordinates int row_o = blockIdx.y*O_TILE_WIDTH + ty; int col_o = blockIdx.x*O_TILE_WIDTH + tx; //The input array coordinates int row_i = row_o - mask_width/2; int col_i = col_o - mask_width/2; //Loads the input values or zero(to the ghosts and halo cells) if(row_i >= 0 && row_i < height && col_i >=0 && col_i < width){ shared[ty*SHARED_STRIDE + tx] = input[row_i * width + col_i]; }else{ shared[ty*SHARED_STRIDE + tx] = 0.0; } __syncthreads(); double value = 0.0; if(ty < O_TILE_WIDTH && tx < O_TILE_WIDTH){ //Check if the thread index is into the output tile width for(int i = 0; i < mask_width; i++){ // Gets the rows and cols of the mask for(int j = 0; j < mask_width; j++){ value += mask[i*mask_width + j] * shared[(i+ty)*SHARED_STRIDE + (j+tx)]; // Performs the convolution } } if(row_o < height && col_o < width){ //Check if thread indexes are into the output domain output[row_o*width + col_o] = value; } } } void pmpp__2d_convolution_host(double *input, double *output, const int width, const int height, const double *mask, const int mask_width){ int ghosts_by_side = mask_width/2; double sum; int input_row, input_col; for(int output_row = 0; output_row < height; output_row++){ // Iterates through each output position (row and col) to calculate it for(int output_col = 0; output_col < width; output_col++){ sum = 0; for(int mask_row = 0; mask_row < mask_width; mask_row++){ // Iterates through each mask position (row) to get it input_row = output_row - ghosts_by_side + mask_row; //Calculates the input row index to be accessed if(input_row >= 0 && input_row < height ){ //Checks if the row has input values or only ghosts for(int mask_col = 0; mask_col < mask_width; mask_col++){ // Iterates through each mask position (col) to get it input_col = output_col - ghosts_by_side + mask_col; //Calculates the input col index to be accessed if(input_col >= 0 && input_col < width){ //Checks if the col of the current row is a input value or a ghost cell sum += input[input_row*width + input_col]*mask[mask_row*mask_width + mask_col]; //Performs the convolution } } output[output_row*width + output_col] = sum; } } } } } void pmpp__1d_convolution_host(double *input, double *output, const int length, const double *mask, const int mask_width){ int ghosts_by_side = mask_width/2; double sum; int input_idx; for(int out_idx = 0; out_idx < length; out_idx++){ // Iterates through each output position to calculate it sum = 0; for(int mask_idx = 0; mask_idx < mask_width; mask_idx++){ // Iterates through each mask position input_idx = out_idx - ghosts_by_side + mask_idx; // Calculates the input index if(input_idx >= 0 && input_idx < length){ // Check if the input index is not a ghost sum+=input[input_idx]*mask[mask_idx]; //Performs the convolution } } output[out_idx] = sum; } } __global__ void pmpp__1d_convolution_kernel(double *input, double *output, const int length, double *mask, const int mask_width){ int tid = blockIdx.x*blockDim.x + threadIdx.x; extern __shared__ double shared[]; // Each thread loads data from global to the block shared memory shared[threadIdx.x] = tid < length ? input[tid] : 0.0; __syncthreads(); // Defines the data index that belongs to each tile int this_tile_start_point = blockIdx.x * blockDim.x; int next_tile_start_point = (blockIdx.x + 1) * blockDim.x; // Go back int(mask_width/2) positions in order to start from the block scope external cells (halos or ghosts placed before the this_tile_start_point position) int n_start_point = tid - (mask_width/2); double p = 0; for(int j = 0; j < mask_width; j++){ int n_index = n_start_point + j; if(n_index >= 0 && n_index < length){ //Check if the n_index not refers to a ghost cell if(n_index >= this_tile_start_point && n_index < next_tile_start_point){ // If is an internal cell (true) or a halo cell (false) p += shared[threadIdx.x + j - mask_width/2]*mask[j]; }else{ p += input[n_index] * mask[j]; //Takes the N[value] from the cache (Luckily!) or from the global memory and performs the convolution } } } output[tid] = p; }
cf526ed45c8b51e16fa89c053d6a52fe4d20efb6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // *** DO NOT EDIT *** // // This test has been automatically generated by // builtins-nvtx-mma.py --ptx=71 --gpu-arch=80 // // Make sure we can handle all builtins available on sm_80 with PTX71 // RUN: %clang_cc1 -triple nvptx64-unknown-unknown -target-cpu sm_80 \ // RUN: -fcuda-is-device -target-feature +ptx71 \ // RUN: -DPTX=71 -DSM=80 \ // RUN: -S -emit-llvm -o - -x cuda %s \ // RUN: | FileCheck -check-prefixes=CHECK_PTX70_SM80,CHECK_PTX60_SM70,CHECK_PTX63_SM72,CHECK_PTX61_SM70,CHECK_PTX63_SM75,CHECK_PTX71_SM80 %s // Verify that all builtins have correct constraints. // RUN: %clang_cc1 -triple nvptx-unknown-unknown \ // RUN: -target-cpu sm_60 -target-feature +ptx42 \ // RUN: -DPTX=71 -DSM=80 -fcuda-is-device -S -o /dev/null -x cuda \ // RUN: -verify %s #if !defined(TORCH_HIP_VERSION) #define __device__ __attribute__((device)) #define __global__ __attribute__((global)) #define __shared__ __attribute__((shared)) #define __constant__ __attribute__((constant)) typedef unsigned long long uint64_t; #endif // CHECK-LABEL: test_wmma_buitins __device__ void test_wmma_buitins(int *src, int *dst, float *fsrc, float *fdst, double *dsrc, double *ddst, int ldm) { #if (PTX >= 60) && (SM >= 70) // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_a' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_a(dst, src, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_a' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_a(dst, src, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_b' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_b(dst, src, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_b' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_b(dst, src, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f16(dst, src, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f16(dst, src, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_st_c_f16(dst, src, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_st_c_f16(dst, src, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 1); #endif // (PTX >= 60) && (SM >= 70) #if (PTX >= 61) && (SM >= 70) // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_a' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_a(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_a' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_a(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_b' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_b(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_b' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_b(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f16(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f16(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_st_c_f16(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_st_c_f16(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_a' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_a(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_a' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_a(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_b' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_b(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_b' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_b(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f16(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f16(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_st_c_f16(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_st_c_f16(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 0, 1); #endif // (PTX >= 61) && (SM >= 70) #if (PTX >= 63) && (SM >= 72) // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.s8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_a_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.s8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_a_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.u8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_a_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.u8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_a_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.s8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_b_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.s8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_b_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.u8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_b_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.u8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_b_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.s32 // expected-error-re@+1 {{'__imma_m16n16k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.s32 // expected-error-re@+1 {{'__imma_m16n16k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.s32 // expected-error-re@+1 {{'__imma_m16n16k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.s32 // expected-error-re@+1 {{'__imma_m16n16k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.s8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_a_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.s8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_a_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.u8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_a_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.u8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_a_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.s8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_b_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.s8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_b_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.u8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_b_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.u8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_b_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.s32 // expected-error-re@+1 {{'__imma_m32n8k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.s32 // expected-error-re@+1 {{'__imma_m32n8k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.s32 // expected-error-re@+1 {{'__imma_m32n8k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.s32 // expected-error-re@+1 {{'__imma_m32n8k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.s8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_a_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.s8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_a_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.u8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_a_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.u8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_a_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.s8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_b_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.s8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_b_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.u8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_b_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.u8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_b_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.s32 // expected-error-re@+1 {{'__imma_m8n32k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.s32 // expected-error-re@+1 {{'__imma_m8n32k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.s32 // expected-error-re@+1 {{'__imma_m8n32k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.s32 // expected-error-re@+1 {{'__imma_m8n32k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.s8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.s8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.s8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.s8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.s8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.s8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.s8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.s8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.u8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.u8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.u8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.u8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.u8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.u8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.u8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.u8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.s8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.s8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.s8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.s8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.s8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.s8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.s8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.s8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.u8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.u8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.u8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.u8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.u8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.u8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.u8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.u8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.s8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.s8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.s8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.s8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.s8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.s8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.s8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.s8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.u8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.u8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.u8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.u8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.u8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.u8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.u8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.u8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 0, 1); #endif // (PTX >= 63) && (SM >= 72) #if (PTX >= 63) && (SM >= 75) // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.a.row.stride.b1 // expected-error-re@+1 {{'__bmma_m8n8k128_ld_a_b1' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_ld_a_b1(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.b.col.stride.b1 // expected-error-re@+1 {{'__bmma_m8n8k128_ld_b_b1' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_ld_b_b1(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.c.col.stride.s32 // expected-error-re@+1 {{'__bmma_m8n8k128_ld_c' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.c.row.stride.s32 // expected-error-re@+1 {{'__bmma_m8n8k128_ld_c' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.store.d.col.stride.s32 // expected-error-re@+1 {{'__bmma_m8n8k128_st_c_i32' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.store.d.row.stride.s32 // expected-error-re@+1 {{'__bmma_m8n8k128_st_c_i32' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.a.row.stride.s4 // expected-error-re@+1 {{'__imma_m8n8k32_ld_a_s4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_a_s4(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.a.row.stride.u4 // expected-error-re@+1 {{'__imma_m8n8k32_ld_a_u4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_a_u4(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.b.col.stride.s4 // expected-error-re@+1 {{'__imma_m8n8k32_ld_b_s4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_b_s4(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.b.col.stride.u4 // expected-error-re@+1 {{'__imma_m8n8k32_ld_b_u4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_b_u4(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.c.col.stride.s32 // expected-error-re@+1 {{'__imma_m8n8k32_ld_c' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.c.row.stride.s32 // expected-error-re@+1 {{'__imma_m8n8k32_ld_c' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.store.d.col.stride.s32 // expected-error-re@+1 {{'__imma_m8n8k32_st_c_i32' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.store.d.row.stride.s32 // expected-error-re@+1 {{'__imma_m8n8k32_st_c_i32' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.mma.xor.popc.row.col.b1 // expected-error-re@+1 {{'__bmma_m8n8k128_mma_xor_popc_b1' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_mma_xor_popc_b1(dst, src, src, src, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.s4 // expected-error-re@+1 {{'__imma_m8n8k32_mma_s4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_mma_s4(dst, src, src, src, 1, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.s4.satfinite // expected-error-re@+1 {{'__imma_m8n8k32_mma_s4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_mma_s4(dst, src, src, src, 1, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.u4 // expected-error-re@+1 {{'__imma_m8n8k32_mma_u4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_mma_u4(dst, src, src, src, 1, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.u4.satfinite // expected-error-re@+1 {{'__imma_m8n8k32_mma_u4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_mma_u4(dst, src, src, src, 1, 1); #endif // (PTX >= 63) && (SM >= 75) #if (PTX >= 70) && (SM >= 80) // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_ld_a(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_ld_a(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_ld_b(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_ld_b(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.a.col.stride.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_a(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.a.row.stride.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_a(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.b.col.stride.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_b(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.b.row.stride.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_b(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.c.col.stride.f32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_c' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_c(fdst, fsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.c.row.stride.f32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_c' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_c(fdst, fsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.store.d.col.stride.f32 // expected-error-re@+1 {{'__mma_m16n16k8_st_c_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_m16n16k8_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.store.d.row.stride.f32 // expected-error-re@+1 {{'__mma_m16n16k8_st_c_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_m16n16k8_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_ld_a(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_ld_a(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_ld_b(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_ld_b(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_ld_a(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_ld_a(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_ld_b(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_ld_b(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.a.col.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_a(ddst, dsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.a.row.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_a(ddst, dsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.b.col.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_b(ddst, dsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.b.row.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_b(ddst, dsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.c.col.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_c' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_c(ddst, dsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.c.row.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_c' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_c(ddst, dsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.store.d.col.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_st_c_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_st_c_f64(ddst, dsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.store.d.row.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_st_c_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_st_c_f64(ddst, dsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_mma_f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_mma_f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_mma_f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_mma_f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.mma.col.col.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_mma_f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.mma.col.row.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_mma_f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.mma.row.col.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_mma_f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.mma.row.row.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_mma_f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_mma_f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_mma_f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_mma_f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_mma_f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_mma_f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_mma_f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_mma_f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_mma_f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.mma.col.col.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_mma_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_mma_f64(ddst, dsrc, dsrc, dsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.mma.col.row.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_mma_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_mma_f64(ddst, dsrc, dsrc, dsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.mma.row.col.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_mma_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_mma_f64(ddst, dsrc, dsrc, dsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.mma.row.row.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_mma_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_mma_f64(ddst, dsrc, dsrc, dsrc, 0, 0); #endif // (PTX >= 70) && (SM >= 80) #if (PTX >= 71) && (SM >= 80) // CHECK_PTX71_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k128.mma.and.popc.row.col.b1 // expected-error-re@+1 {{'__bmma_m8n8k128_mma_and_popc_b1' needs target feature (sm_80{{.*}},(ptx71{{.*}}}} __bmma_m8n8k128_mma_and_popc_b1(dst, src, src, src, 1); #endif // (PTX >= 71) && (SM >= 80) }
cf526ed45c8b51e16fa89c053d6a52fe4d20efb6.cu
// // *** DO NOT EDIT *** // // This test has been automatically generated by // builtins-nvtx-mma.py --ptx=71 --gpu-arch=80 // // Make sure we can handle all builtins available on sm_80 with PTX71 // RUN: %clang_cc1 -triple nvptx64-unknown-unknown -target-cpu sm_80 \ // RUN: -fcuda-is-device -target-feature +ptx71 \ // RUN: -DPTX=71 -DSM=80 \ // RUN: -S -emit-llvm -o - -x cuda %s \ // RUN: | FileCheck -check-prefixes=CHECK_PTX70_SM80,CHECK_PTX60_SM70,CHECK_PTX63_SM72,CHECK_PTX61_SM70,CHECK_PTX63_SM75,CHECK_PTX71_SM80 %s // Verify that all builtins have correct constraints. // RUN: %clang_cc1 -triple nvptx-unknown-unknown \ // RUN: -target-cpu sm_60 -target-feature +ptx42 \ // RUN: -DPTX=71 -DSM=80 -fcuda-is-device -S -o /dev/null -x cuda \ // RUN: -verify %s #if !defined(CUDA_VERSION) #define __device__ __attribute__((device)) #define __global__ __attribute__((global)) #define __shared__ __attribute__((shared)) #define __constant__ __attribute__((constant)) typedef unsigned long long uint64_t; #endif // CHECK-LABEL: test_wmma_buitins __device__ void test_wmma_buitins(int *src, int *dst, float *fsrc, float *fdst, double *dsrc, double *ddst, int ldm) { #if (PTX >= 60) && (SM >= 70) // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_a' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_a(dst, src, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_a' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_a(dst, src, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_b' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_b(dst, src, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_b' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_b(dst, src, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f16(dst, src, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f16(dst, src, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_st_c_f16(dst, src, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_st_c_f16(dst, src, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 1); #endif // (PTX >= 60) && (SM >= 70) #if (PTX >= 61) && (SM >= 70) // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_a' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_a(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_a' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_a(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_b' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_b(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_b' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_b(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f16(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f16(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_st_c_f16(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_st_c_f16(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_a' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_a(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_a' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_a(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_b' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_b(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_b' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_b(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f16(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f16(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_st_c_f16(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_st_c_f16(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 0, 1); #endif // (PTX >= 61) && (SM >= 70) #if (PTX >= 63) && (SM >= 72) // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.s8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_a_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.s8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_a_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.u8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_a_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.u8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_a_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.s8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_b_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.s8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_b_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.u8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_b_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.u8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_b_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.s32 // expected-error-re@+1 {{'__imma_m16n16k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.s32 // expected-error-re@+1 {{'__imma_m16n16k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.s32 // expected-error-re@+1 {{'__imma_m16n16k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.s32 // expected-error-re@+1 {{'__imma_m16n16k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.s8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_a_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.s8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_a_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.u8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_a_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.u8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_a_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.s8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_b_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.s8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_b_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.u8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_b_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.u8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_b_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.s32 // expected-error-re@+1 {{'__imma_m32n8k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.s32 // expected-error-re@+1 {{'__imma_m32n8k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.s32 // expected-error-re@+1 {{'__imma_m32n8k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.s32 // expected-error-re@+1 {{'__imma_m32n8k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.s8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_a_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.s8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_a_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.u8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_a_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.u8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_a_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.s8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_b_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.s8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_b_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.u8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_b_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.u8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_b_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.s32 // expected-error-re@+1 {{'__imma_m8n32k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.s32 // expected-error-re@+1 {{'__imma_m8n32k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.s32 // expected-error-re@+1 {{'__imma_m8n32k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.s32 // expected-error-re@+1 {{'__imma_m8n32k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.s8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.s8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.s8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.s8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.s8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.s8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.s8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.s8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.u8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.u8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.u8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.u8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.u8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.u8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.u8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.u8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.s8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.s8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.s8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.s8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.s8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.s8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.s8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.s8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.u8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.u8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.u8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.u8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.u8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.u8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.u8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.u8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.s8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.s8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.s8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.s8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.s8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.s8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.s8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.s8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.u8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.u8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.u8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.u8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.u8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.u8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.u8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.u8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 0, 1); #endif // (PTX >= 63) && (SM >= 72) #if (PTX >= 63) && (SM >= 75) // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.a.row.stride.b1 // expected-error-re@+1 {{'__bmma_m8n8k128_ld_a_b1' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_ld_a_b1(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.b.col.stride.b1 // expected-error-re@+1 {{'__bmma_m8n8k128_ld_b_b1' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_ld_b_b1(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.c.col.stride.s32 // expected-error-re@+1 {{'__bmma_m8n8k128_ld_c' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.c.row.stride.s32 // expected-error-re@+1 {{'__bmma_m8n8k128_ld_c' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.store.d.col.stride.s32 // expected-error-re@+1 {{'__bmma_m8n8k128_st_c_i32' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.store.d.row.stride.s32 // expected-error-re@+1 {{'__bmma_m8n8k128_st_c_i32' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.a.row.stride.s4 // expected-error-re@+1 {{'__imma_m8n8k32_ld_a_s4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_a_s4(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.a.row.stride.u4 // expected-error-re@+1 {{'__imma_m8n8k32_ld_a_u4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_a_u4(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.b.col.stride.s4 // expected-error-re@+1 {{'__imma_m8n8k32_ld_b_s4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_b_s4(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.b.col.stride.u4 // expected-error-re@+1 {{'__imma_m8n8k32_ld_b_u4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_b_u4(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.c.col.stride.s32 // expected-error-re@+1 {{'__imma_m8n8k32_ld_c' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.c.row.stride.s32 // expected-error-re@+1 {{'__imma_m8n8k32_ld_c' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.store.d.col.stride.s32 // expected-error-re@+1 {{'__imma_m8n8k32_st_c_i32' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.store.d.row.stride.s32 // expected-error-re@+1 {{'__imma_m8n8k32_st_c_i32' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.mma.xor.popc.row.col.b1 // expected-error-re@+1 {{'__bmma_m8n8k128_mma_xor_popc_b1' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_mma_xor_popc_b1(dst, src, src, src, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.s4 // expected-error-re@+1 {{'__imma_m8n8k32_mma_s4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_mma_s4(dst, src, src, src, 1, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.s4.satfinite // expected-error-re@+1 {{'__imma_m8n8k32_mma_s4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_mma_s4(dst, src, src, src, 1, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.u4 // expected-error-re@+1 {{'__imma_m8n8k32_mma_u4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_mma_u4(dst, src, src, src, 1, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.u4.satfinite // expected-error-re@+1 {{'__imma_m8n8k32_mma_u4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_mma_u4(dst, src, src, src, 1, 1); #endif // (PTX >= 63) && (SM >= 75) #if (PTX >= 70) && (SM >= 80) // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_ld_a(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_ld_a(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_ld_b(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_ld_b(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.a.col.stride.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_a(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.a.row.stride.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_a(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.b.col.stride.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_b(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.b.row.stride.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_b(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.c.col.stride.f32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_c' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_c(fdst, fsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.c.row.stride.f32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_c' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_c(fdst, fsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.store.d.col.stride.f32 // expected-error-re@+1 {{'__mma_m16n16k8_st_c_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_m16n16k8_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.store.d.row.stride.f32 // expected-error-re@+1 {{'__mma_m16n16k8_st_c_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_m16n16k8_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_ld_a(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_ld_a(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_ld_b(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_ld_b(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_ld_a(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_ld_a(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_ld_b(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_ld_b(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.a.col.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_a(ddst, dsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.a.row.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_a(ddst, dsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.b.col.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_b(ddst, dsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.b.row.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_b(ddst, dsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.c.col.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_c' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_c(ddst, dsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.c.row.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_c' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_c(ddst, dsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.store.d.col.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_st_c_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_st_c_f64(ddst, dsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.store.d.row.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_st_c_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_st_c_f64(ddst, dsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_mma_f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_mma_f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_mma_f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_mma_f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.mma.col.col.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_mma_f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.mma.col.row.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_mma_f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.mma.row.col.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_mma_f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.mma.row.row.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_mma_f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_mma_f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_mma_f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_mma_f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_mma_f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_mma_f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_mma_f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_mma_f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_mma_f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.mma.col.col.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_mma_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_mma_f64(ddst, dsrc, dsrc, dsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.mma.col.row.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_mma_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_mma_f64(ddst, dsrc, dsrc, dsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.mma.row.col.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_mma_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_mma_f64(ddst, dsrc, dsrc, dsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.mma.row.row.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_mma_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_mma_f64(ddst, dsrc, dsrc, dsrc, 0, 0); #endif // (PTX >= 70) && (SM >= 80) #if (PTX >= 71) && (SM >= 80) // CHECK_PTX71_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k128.mma.and.popc.row.col.b1 // expected-error-re@+1 {{'__bmma_m8n8k128_mma_and_popc_b1' needs target feature (sm_80{{.*}},(ptx71{{.*}}}} __bmma_m8n8k128_mma_and_popc_b1(dst, src, src, src, 1); #endif // (PTX >= 71) && (SM >= 80) }
ba5011b31084fe3d96e26ea6496a0c2de023312f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <text/subword/detail/data_normalizer.hpp> #include <text/subword/detail/tokenizer_utils.cuh> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/strings/detail/utilities.cuh> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/pair.h> #include <thrust/remove.h> #include <thrust/scan.h> #include <thrust/transform.h> namespace nvtext { namespace detail { namespace { /** * @brief Bit used to filter out invalid code points. * * When normalizing characters to code point values, if this bit is set, * the code point should be filtered out before returning from the normalizer. */ constexpr uint32_t FILTER_BIT = 22; /** * @brief Retrieve new code point from metadata value. * * @param metadata Value from the codepoint_metadata table. * @return The replacement character if appropriate. */ __device__ uint32_t get_first_cp(uint32_t metadata) { return metadata & NEW_CP_MASK; } /** * @brief Retrieve token category from the metadata value. * * Category values are 0-5: * 0 - character should be padded * 1 - pad character if lower-case * 2 - character should be removed * 3 - remove character if lower-case * 4 - whitespace character -- always replace * 5 - uncategorized * * @param metadata Value from the codepoint_metadata table. * @return Category value. */ __device__ uint32_t extract_token_cat(uint32_t metadata) { return (metadata >> TOKEN_CAT_SHIFT) & TOKEN_CAT_MASK; } /** * @brief Return true if category of metadata value specifies the character should be replaced. */ __device__ bool should_remove_cp(uint32_t metadata, bool lower_case) { auto const cat = extract_token_cat(metadata); return (cat == TOKEN_CAT_REMOVE_CHAR) || (lower_case && (cat == TOKEN_CAT_REMOVE_CHAR_IF_LOWER)); } /** * @brief Return true if category of metadata value specifies the character should be padded. */ __device__ bool should_add_spaces(uint32_t metadata, bool lower_case) { auto const cat = extract_token_cat(metadata); return (cat == TOKEN_CAT_ADD_SPACE) || (lower_case && (cat == TOKEN_CAT_ADD_SPACE_IF_LOWER)); } /** * @brief Return true if category of metadata value specifies the character should be replaced. */ __device__ bool always_replace(uint32_t metadata) { return extract_token_cat(metadata) == TOKEN_CAT_ALWAYS_REPLACE; } /** * @brief Returns true if metadata value includes a multi-character transform bit equal to 1. */ __device__ bool is_multi_char_transform(uint32_t metadata) { return (metadata >> MULTICHAR_SHIFT) & MULTICHAR_MASK; } /** * @brief Returns true if the byte passed in could be a valid head byte for * a utf8 character. That is, not binary `10xxxxxx` */ __device__ bool is_head_byte(unsigned char utf8_byte) { return (utf8_byte >> 6) != 2; } /** * @brief Converts a UTF-8 character into a unicode code point value. * * If the byte at start_byte_for_thread is the first byte of a UTF-8 character (head byte), * the UTF-8 character is converted to a unicode code point and returned. * * If the byte at start_byte_for_thread is not a head byte, 0 is returned. * * All threads start reading bytes from the pointer denoted by strings. * * @param strings A pointer to the start of the sequence of characters to be analyzed. * @param start_byte_for_thread Which byte to start analyzing * @return New code point value for this byte. */ __device__ uint32_t extract_code_points_from_utf8(unsigned char const* strings, size_t const total_bytes, uint32_t const start_byte_for_thread) { constexpr uint8_t max_utf8_blocks_for_char = 4; uint8_t utf8_blocks[max_utf8_blocks_for_char] = {0}; for (int i = 0; i < ::min(static_cast<size_t>(max_utf8_blocks_for_char), total_bytes - start_byte_for_thread); ++i) { utf8_blocks[i] = strings[start_byte_for_thread + i]; } const uint8_t length_encoding_bits = utf8_blocks[0] >> 3; // UTF-8 format is variable-width character encoding using up to 4 bytes. // If the first byte is: // - [x00-x7F] -- beginning of a 1-byte character (ASCII) // - [xC0-xDF] -- beginning of a 2-byte character // - [xE0-xEF] -- beginning of a 3-byte character // - [xF0-xF7] -- beginning of a 3-byte character // Anything else is an intermediate byte [x80-xBF]. // So shifted by 3 bits this becomes // - [x00-x0F] or leb < 16 // - [x18-x1B] or 24 <= leb <= 27 // - [x1C-x1D] or 28 <= leb <= 29 // - [x1E-x1F] or leb >= 30 // The remaining bits are part of the value as specified by the mask // specified by x's below. // - b0xxxxxxx = x7F // - b110xxxxx = x1F // - b1110xxxx = x0F // - b11110xxx = x07 using encoding_length_pair = thrust::pair<uint8_t, uint8_t>; // Set the number of characters and the top masks based on the length encoding bits. encoding_length_pair const char_encoding_length = [length_encoding_bits] { if (length_encoding_bits < 16) return encoding_length_pair{1, 0x7F}; if (length_encoding_bits >= 24 && length_encoding_bits <= 27) return encoding_length_pair{2, 0x1F}; if (length_encoding_bits == 28 || length_encoding_bits == 29) return encoding_length_pair{3, 0x0F}; if (length_encoding_bits == 30) return encoding_length_pair{4, 0x07}; return encoding_length_pair{0, 0}; }(); // Now pack up the bits into a uint32_t. // Move the first set of values into bits 19-24 in the 32-bit value. uint32_t code_point = (utf8_blocks[0] & char_encoding_length.second) << 18; // Move the remaining values which are 6 bits (mask b10xxxxxx = x3F) // from the remaining bytes into successive positions in the 32-bit result. code_point |= ((utf8_blocks[1] & 0x3F) << 12); code_point |= ((utf8_blocks[2] & 0x3F) << 6); code_point |= utf8_blocks[3] & 0x3F; // Adjust the final result by shifting by the character length. uint8_t const shift_amt = 24 - 6 * char_encoding_length.first; code_point >>= shift_amt; return code_point; } /** * @brief Normalize the characters for the strings input. * * Characters are replaced, padded, or removed depending on the `do_lower_case` input * as well as the metadata values for each code point found in `cp_metadata`. * * First, each character is converted from UTF-8 to a unicode code point value. * This value is then looked up in the `cp_metadata` table to determine its fate. * The end result is a set of code point values for each character. * The normalized set of characters make it easier for the tokenizer to identify * tokens and match up token ids. * * @param[in] strings The input strings with characters to normalize to code point values. * @param[in] total_bytes Total number of bytes in the input `strings` vector. * @param[in] cp_metadata The metadata lookup table for every unicode code point value. * @param[in] aux_table Aux table for mapping some multi-byte code point values. * @param[in] do_lower_case True if normalization should include lower-casing. * @param[out] code_points The resulting code point values from normalization. * @param[out] chars_per_thread Output number of code point values per string. */ __global__ void kernel_data_normalizer(unsigned char const* strings, size_t const total_bytes, uint32_t const* cp_metadata, uint64_t const* aux_table, bool const do_lower_case, uint32_t* code_points, uint32_t* chars_per_thread) { constexpr uint32_t init_val = (1 << FILTER_BIT); uint32_t replacement_code_points[MAX_NEW_CHARS] = {init_val, init_val, init_val}; uint32_t const char_for_thread = blockDim.x * blockIdx.x + threadIdx.x; uint32_t num_new_chars = 0; if (char_for_thread < total_bytes) { auto const code_point = extract_code_points_from_utf8(strings, total_bytes, char_for_thread); auto const metadata = cp_metadata[code_point]; if (is_head_byte(strings[char_for_thread]) && !should_remove_cp(metadata, do_lower_case)) { num_new_chars = 1; // Apply lower cases and accent stripping if necessary auto const new_cp = do_lower_case || always_replace(metadata) ? get_first_cp(metadata) : code_point; replacement_code_points[0] = new_cp == 0 ? code_point : new_cp; if (do_lower_case && is_multi_char_transform(metadata)) { auto const next_cps = aux_table[code_point]; replacement_code_points[1] = static_cast<uint32_t>(next_cps >> 32); auto const potential_next_cp = static_cast<uint32_t>(next_cps); replacement_code_points[2] = potential_next_cp != 0 ? potential_next_cp : replacement_code_points[2]; num_new_chars = 2 + (potential_next_cp != 0); } if (should_add_spaces(metadata, do_lower_case)) { // Need to shift all existing code-points up one // This is a rotate right. There is no thrust equivalent at this time. for (int loc = num_new_chars; loc > 0; --loc) { replacement_code_points[loc] = replacement_code_points[loc - 1]; } // Write the required spaces at the end replacement_code_points[0] = SPACE_CODE_POINT; replacement_code_points[num_new_chars + 1] = SPACE_CODE_POINT; num_new_chars += 2; } } } chars_per_thread[char_for_thread] = num_new_chars; using BlockStore = cub::BlockStore<uint32_t, THREADS_PER_BLOCK, MAX_NEW_CHARS, cub::BLOCK_STORE_WARP_TRANSPOSE>; __shared__ typename BlockStore::TempStorage temp_storage; // Now we perform coalesced writes back to global memory using cub. uint32_t* block_base = code_points + blockIdx.x * blockDim.x * MAX_NEW_CHARS; BlockStore(temp_storage).Store(block_base, replacement_code_points); } } // namespace data_normalizer::data_normalizer(codepoint_metadata_type const* cp_metadata, aux_codepoint_data_type const* aux_table, bool do_lower_case) : d_cp_metadata{cp_metadata}, d_aux_table{aux_table}, do_lower_case{do_lower_case} { } uvector_pair data_normalizer::normalize(char const* d_strings, uint32_t const* d_offsets, uint32_t num_strings, rmm::cuda_stream_view stream) const { if (num_strings == 0) return std::pair(std::make_unique<rmm::device_uvector<uint32_t>>(0, stream), std::make_unique<rmm::device_uvector<uint32_t>>(0, stream)); // copy offsets to working memory size_t const num_offsets = num_strings + 1; auto d_strings_offsets = std::make_unique<rmm::device_uvector<uint32_t>>(num_offsets, stream); thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<uint32_t>(0), thrust::make_counting_iterator<uint32_t>(num_offsets), d_strings_offsets->begin(), [d_offsets] __device__(auto idx) { auto const offset = d_offsets[0]; // adjust for any offset to the offsets return d_offsets[idx] - offset; }); uint32_t const bytes_count = d_strings_offsets->element(num_strings, stream); if (bytes_count == 0) // if no bytes, nothing to do return std::pair(std::make_unique<rmm::device_uvector<uint32_t>>(0, stream), std::make_unique<rmm::device_uvector<uint32_t>>(0, stream)); cudf::detail::grid_1d const grid{static_cast<cudf::size_type>(bytes_count), THREADS_PER_BLOCK, 1}; size_t const threads_on_device = grid.num_threads_per_block * grid.num_blocks; size_t const max_new_char_total = MAX_NEW_CHARS * threads_on_device; auto d_code_points = std::make_unique<rmm::device_uvector<uint32_t>>(max_new_char_total, stream); rmm::device_uvector<uint32_t> d_chars_per_thread(threads_on_device, stream); hipLaunchKernelGGL(( kernel_data_normalizer), dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream.value(), reinterpret_cast<const unsigned char*>(d_strings), bytes_count, d_cp_metadata, d_aux_table, do_lower_case, d_code_points->data(), d_chars_per_thread.data()); // Remove the 'empty' code points from the vector thrust::remove(rmm::exec_policy(stream), d_code_points->begin(), d_code_points->end(), uint32_t{1 << FILTER_BIT}); // We also need to prefix sum the number of characters up to an including // the current character in order to get the new strings lengths. thrust::inclusive_scan(rmm::exec_policy(stream), d_chars_per_thread.begin(), d_chars_per_thread.end(), d_chars_per_thread.begin()); // This will reset the offsets to the new generated code point values thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator<uint32_t>(1), num_strings, update_strings_lengths_fn{d_chars_per_thread.data(), d_strings_offsets->data()}); uint32_t const num_chars = d_strings_offsets->element(num_strings, stream); d_code_points->resize(num_chars, stream); // should be smaller than original allocated size // return the normalized code points and the new offsets return uvector_pair(std::move(d_code_points), std::move(d_strings_offsets)); } } // namespace detail } // namespace nvtext
ba5011b31084fe3d96e26ea6496a0c2de023312f.cu
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <text/subword/detail/data_normalizer.hpp> #include <text/subword/detail/tokenizer_utils.cuh> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/strings/detail/utilities.cuh> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/pair.h> #include <thrust/remove.h> #include <thrust/scan.h> #include <thrust/transform.h> namespace nvtext { namespace detail { namespace { /** * @brief Bit used to filter out invalid code points. * * When normalizing characters to code point values, if this bit is set, * the code point should be filtered out before returning from the normalizer. */ constexpr uint32_t FILTER_BIT = 22; /** * @brief Retrieve new code point from metadata value. * * @param metadata Value from the codepoint_metadata table. * @return The replacement character if appropriate. */ __device__ uint32_t get_first_cp(uint32_t metadata) { return metadata & NEW_CP_MASK; } /** * @brief Retrieve token category from the metadata value. * * Category values are 0-5: * 0 - character should be padded * 1 - pad character if lower-case * 2 - character should be removed * 3 - remove character if lower-case * 4 - whitespace character -- always replace * 5 - uncategorized * * @param metadata Value from the codepoint_metadata table. * @return Category value. */ __device__ uint32_t extract_token_cat(uint32_t metadata) { return (metadata >> TOKEN_CAT_SHIFT) & TOKEN_CAT_MASK; } /** * @brief Return true if category of metadata value specifies the character should be replaced. */ __device__ bool should_remove_cp(uint32_t metadata, bool lower_case) { auto const cat = extract_token_cat(metadata); return (cat == TOKEN_CAT_REMOVE_CHAR) || (lower_case && (cat == TOKEN_CAT_REMOVE_CHAR_IF_LOWER)); } /** * @brief Return true if category of metadata value specifies the character should be padded. */ __device__ bool should_add_spaces(uint32_t metadata, bool lower_case) { auto const cat = extract_token_cat(metadata); return (cat == TOKEN_CAT_ADD_SPACE) || (lower_case && (cat == TOKEN_CAT_ADD_SPACE_IF_LOWER)); } /** * @brief Return true if category of metadata value specifies the character should be replaced. */ __device__ bool always_replace(uint32_t metadata) { return extract_token_cat(metadata) == TOKEN_CAT_ALWAYS_REPLACE; } /** * @brief Returns true if metadata value includes a multi-character transform bit equal to 1. */ __device__ bool is_multi_char_transform(uint32_t metadata) { return (metadata >> MULTICHAR_SHIFT) & MULTICHAR_MASK; } /** * @brief Returns true if the byte passed in could be a valid head byte for * a utf8 character. That is, not binary `10xxxxxx` */ __device__ bool is_head_byte(unsigned char utf8_byte) { return (utf8_byte >> 6) != 2; } /** * @brief Converts a UTF-8 character into a unicode code point value. * * If the byte at start_byte_for_thread is the first byte of a UTF-8 character (head byte), * the UTF-8 character is converted to a unicode code point and returned. * * If the byte at start_byte_for_thread is not a head byte, 0 is returned. * * All threads start reading bytes from the pointer denoted by strings. * * @param strings A pointer to the start of the sequence of characters to be analyzed. * @param start_byte_for_thread Which byte to start analyzing * @return New code point value for this byte. */ __device__ uint32_t extract_code_points_from_utf8(unsigned char const* strings, size_t const total_bytes, uint32_t const start_byte_for_thread) { constexpr uint8_t max_utf8_blocks_for_char = 4; uint8_t utf8_blocks[max_utf8_blocks_for_char] = {0}; for (int i = 0; i < std::min(static_cast<size_t>(max_utf8_blocks_for_char), total_bytes - start_byte_for_thread); ++i) { utf8_blocks[i] = strings[start_byte_for_thread + i]; } const uint8_t length_encoding_bits = utf8_blocks[0] >> 3; // UTF-8 format is variable-width character encoding using up to 4 bytes. // If the first byte is: // - [x00-x7F] -- beginning of a 1-byte character (ASCII) // - [xC0-xDF] -- beginning of a 2-byte character // - [xE0-xEF] -- beginning of a 3-byte character // - [xF0-xF7] -- beginning of a 3-byte character // Anything else is an intermediate byte [x80-xBF]. // So shifted by 3 bits this becomes // - [x00-x0F] or leb < 16 // - [x18-x1B] or 24 <= leb <= 27 // - [x1C-x1D] or 28 <= leb <= 29 // - [x1E-x1F] or leb >= 30 // The remaining bits are part of the value as specified by the mask // specified by x's below. // - b0xxxxxxx = x7F // - b110xxxxx = x1F // - b1110xxxx = x0F // - b11110xxx = x07 using encoding_length_pair = thrust::pair<uint8_t, uint8_t>; // Set the number of characters and the top masks based on the length encoding bits. encoding_length_pair const char_encoding_length = [length_encoding_bits] { if (length_encoding_bits < 16) return encoding_length_pair{1, 0x7F}; if (length_encoding_bits >= 24 && length_encoding_bits <= 27) return encoding_length_pair{2, 0x1F}; if (length_encoding_bits == 28 || length_encoding_bits == 29) return encoding_length_pair{3, 0x0F}; if (length_encoding_bits == 30) return encoding_length_pair{4, 0x07}; return encoding_length_pair{0, 0}; }(); // Now pack up the bits into a uint32_t. // Move the first set of values into bits 19-24 in the 32-bit value. uint32_t code_point = (utf8_blocks[0] & char_encoding_length.second) << 18; // Move the remaining values which are 6 bits (mask b10xxxxxx = x3F) // from the remaining bytes into successive positions in the 32-bit result. code_point |= ((utf8_blocks[1] & 0x3F) << 12); code_point |= ((utf8_blocks[2] & 0x3F) << 6); code_point |= utf8_blocks[3] & 0x3F; // Adjust the final result by shifting by the character length. uint8_t const shift_amt = 24 - 6 * char_encoding_length.first; code_point >>= shift_amt; return code_point; } /** * @brief Normalize the characters for the strings input. * * Characters are replaced, padded, or removed depending on the `do_lower_case` input * as well as the metadata values for each code point found in `cp_metadata`. * * First, each character is converted from UTF-8 to a unicode code point value. * This value is then looked up in the `cp_metadata` table to determine its fate. * The end result is a set of code point values for each character. * The normalized set of characters make it easier for the tokenizer to identify * tokens and match up token ids. * * @param[in] strings The input strings with characters to normalize to code point values. * @param[in] total_bytes Total number of bytes in the input `strings` vector. * @param[in] cp_metadata The metadata lookup table for every unicode code point value. * @param[in] aux_table Aux table for mapping some multi-byte code point values. * @param[in] do_lower_case True if normalization should include lower-casing. * @param[out] code_points The resulting code point values from normalization. * @param[out] chars_per_thread Output number of code point values per string. */ __global__ void kernel_data_normalizer(unsigned char const* strings, size_t const total_bytes, uint32_t const* cp_metadata, uint64_t const* aux_table, bool const do_lower_case, uint32_t* code_points, uint32_t* chars_per_thread) { constexpr uint32_t init_val = (1 << FILTER_BIT); uint32_t replacement_code_points[MAX_NEW_CHARS] = {init_val, init_val, init_val}; uint32_t const char_for_thread = blockDim.x * blockIdx.x + threadIdx.x; uint32_t num_new_chars = 0; if (char_for_thread < total_bytes) { auto const code_point = extract_code_points_from_utf8(strings, total_bytes, char_for_thread); auto const metadata = cp_metadata[code_point]; if (is_head_byte(strings[char_for_thread]) && !should_remove_cp(metadata, do_lower_case)) { num_new_chars = 1; // Apply lower cases and accent stripping if necessary auto const new_cp = do_lower_case || always_replace(metadata) ? get_first_cp(metadata) : code_point; replacement_code_points[0] = new_cp == 0 ? code_point : new_cp; if (do_lower_case && is_multi_char_transform(metadata)) { auto const next_cps = aux_table[code_point]; replacement_code_points[1] = static_cast<uint32_t>(next_cps >> 32); auto const potential_next_cp = static_cast<uint32_t>(next_cps); replacement_code_points[2] = potential_next_cp != 0 ? potential_next_cp : replacement_code_points[2]; num_new_chars = 2 + (potential_next_cp != 0); } if (should_add_spaces(metadata, do_lower_case)) { // Need to shift all existing code-points up one // This is a rotate right. There is no thrust equivalent at this time. for (int loc = num_new_chars; loc > 0; --loc) { replacement_code_points[loc] = replacement_code_points[loc - 1]; } // Write the required spaces at the end replacement_code_points[0] = SPACE_CODE_POINT; replacement_code_points[num_new_chars + 1] = SPACE_CODE_POINT; num_new_chars += 2; } } } chars_per_thread[char_for_thread] = num_new_chars; using BlockStore = cub::BlockStore<uint32_t, THREADS_PER_BLOCK, MAX_NEW_CHARS, cub::BLOCK_STORE_WARP_TRANSPOSE>; __shared__ typename BlockStore::TempStorage temp_storage; // Now we perform coalesced writes back to global memory using cub. uint32_t* block_base = code_points + blockIdx.x * blockDim.x * MAX_NEW_CHARS; BlockStore(temp_storage).Store(block_base, replacement_code_points); } } // namespace data_normalizer::data_normalizer(codepoint_metadata_type const* cp_metadata, aux_codepoint_data_type const* aux_table, bool do_lower_case) : d_cp_metadata{cp_metadata}, d_aux_table{aux_table}, do_lower_case{do_lower_case} { } uvector_pair data_normalizer::normalize(char const* d_strings, uint32_t const* d_offsets, uint32_t num_strings, rmm::cuda_stream_view stream) const { if (num_strings == 0) return std::pair(std::make_unique<rmm::device_uvector<uint32_t>>(0, stream), std::make_unique<rmm::device_uvector<uint32_t>>(0, stream)); // copy offsets to working memory size_t const num_offsets = num_strings + 1; auto d_strings_offsets = std::make_unique<rmm::device_uvector<uint32_t>>(num_offsets, stream); thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<uint32_t>(0), thrust::make_counting_iterator<uint32_t>(num_offsets), d_strings_offsets->begin(), [d_offsets] __device__(auto idx) { auto const offset = d_offsets[0]; // adjust for any offset to the offsets return d_offsets[idx] - offset; }); uint32_t const bytes_count = d_strings_offsets->element(num_strings, stream); if (bytes_count == 0) // if no bytes, nothing to do return std::pair(std::make_unique<rmm::device_uvector<uint32_t>>(0, stream), std::make_unique<rmm::device_uvector<uint32_t>>(0, stream)); cudf::detail::grid_1d const grid{static_cast<cudf::size_type>(bytes_count), THREADS_PER_BLOCK, 1}; size_t const threads_on_device = grid.num_threads_per_block * grid.num_blocks; size_t const max_new_char_total = MAX_NEW_CHARS * threads_on_device; auto d_code_points = std::make_unique<rmm::device_uvector<uint32_t>>(max_new_char_total, stream); rmm::device_uvector<uint32_t> d_chars_per_thread(threads_on_device, stream); kernel_data_normalizer<<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>( reinterpret_cast<const unsigned char*>(d_strings), bytes_count, d_cp_metadata, d_aux_table, do_lower_case, d_code_points->data(), d_chars_per_thread.data()); // Remove the 'empty' code points from the vector thrust::remove(rmm::exec_policy(stream), d_code_points->begin(), d_code_points->end(), uint32_t{1 << FILTER_BIT}); // We also need to prefix sum the number of characters up to an including // the current character in order to get the new strings lengths. thrust::inclusive_scan(rmm::exec_policy(stream), d_chars_per_thread.begin(), d_chars_per_thread.end(), d_chars_per_thread.begin()); // This will reset the offsets to the new generated code point values thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator<uint32_t>(1), num_strings, update_strings_lengths_fn{d_chars_per_thread.data(), d_strings_offsets->data()}); uint32_t const num_chars = d_strings_offsets->element(num_strings, stream); d_code_points->resize(num_chars, stream); // should be smaller than original allocated size // return the normalized code points and the new offsets return uvector_pair(std::move(d_code_points), std::move(d_strings_offsets)); } } // namespace detail } // namespace nvtext
5efcd7bb10eb1a1994e7220afb8b7a857de61f35.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> s d c @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "magma_internal.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { int npivots; int ipiv[MAX_PIVOTS]; } zlaswp_params_t; // Matrix A is stored row-wise in dAT. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void zlaswp_kernel( int n, magmaDoubleComplex *dAT, int ldda, zlaswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; magmaDoubleComplex *A1 = dAT; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaDoubleComplex *A2 = dAT + i2*ldda; magmaDoubleComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= ZLASWP performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT COMPLEX*16 array on GPU, stored row-wise, dimension (LDDA,M) The M-by-N matrix, stored transposed as N-by-M matrix embedded in LDDA-by-M array. M is not given; it is implicit. On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. ldda >= n. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k1.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k2.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, INCI > 0. TODO: If INCI is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ // used in zgessm, zgetrf_incpiv. extern "C" void magmablas_zlaswp( magma_int_t n, magmaDoubleComplex_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( n > ldda ) info = -3; else if ( k1 < 1 ) info = -4; else if ( k2 < 1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); zlaswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } hipLaunchKernelGGL(( zlaswp_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dAT(k,0), ldda, params ); } #undef dAT } /******************************************************************************/ // Extended version has stride in both directions (ldx, ldy) // to handle both row-wise and column-wise storage. // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void zlaswpx_kernel( int n, magmaDoubleComplex *dA, int ldx, int ldy, zlaswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dA += tid*ldy; magmaDoubleComplex *A1 = dA; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaDoubleComplex *A2 = dA + i2*ldx; magmaDoubleComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldx; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= ZLASWPX performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored either row-wise or column-wise, depending on ldx and ldy. ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dA COMPLEX*16 array on GPU, dimension (*,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldx INTEGER Stride between elements in same column. @param[in] ldy INTEGER Stride between elements in same row. For A stored row-wise, set ldx=ldda and ldy=1. For A stored column-wise, set ldx=1 and ldy=ldda. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_zlaswpx( magma_int_t n, magmaDoubleComplex_ptr dA, magma_int_t ldx, magma_int_t ldy, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_)*ldx + (j_)*ldy) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); zlaswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } hipLaunchKernelGGL(( zlaswpx_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA(k,0), ldx, ldy, params ); } #undef dA } /******************************************************************************/ // This version takes d_ipiv on the GPU. Thus it does not pass pivots // as an argument using a structure, avoiding all the argument size // limitations of CUDA and OpenCL. It also needs just one kernel launch // with all the pivots, instead of multiple kernel launches with small // batches of pivots. On Fermi, it is faster than magmablas_zlaswp // (including copying pivots to the GPU). __global__ void zlaswp2_kernel( int n, magmaDoubleComplex *dAT, int ldda, int npivots, const magma_int_t *d_ipiv, int inci ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; magmaDoubleComplex *A1 = dAT; for( int i1 = 0; i1 < npivots; ++i1 ) { int i2 = d_ipiv[i1*inci] - 1; // Fortran index magmaDoubleComplex *A2 = dAT + i2*ldda; magmaDoubleComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= ZLASWP2 performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Here, d_ipiv is passed in GPU memory. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT COMPLEX*16 array on GPU, stored row-wise, dimension (LDDA,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. (I.e., stride between elements in a column.) @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_zlaswp2( magma_int_t n, magmaDoubleComplex_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, magmaInt_const_ptr d_ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t nb = k2-(k1-1); dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); hipLaunchKernelGGL(( zlaswp2_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dAT(k1-1,0), ldda, nb, d_ipiv, inci ); }
5efcd7bb10eb1a1994e7220afb8b7a857de61f35.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> s d c @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "magma_internal.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { int npivots; int ipiv[MAX_PIVOTS]; } zlaswp_params_t; // Matrix A is stored row-wise in dAT. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void zlaswp_kernel( int n, magmaDoubleComplex *dAT, int ldda, zlaswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; magmaDoubleComplex *A1 = dAT; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaDoubleComplex *A2 = dAT + i2*ldda; magmaDoubleComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= ZLASWP performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT COMPLEX*16 array on GPU, stored row-wise, dimension (LDDA,M) The M-by-N matrix, stored transposed as N-by-M matrix embedded in LDDA-by-M array. M is not given; it is implicit. On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. ldda >= n. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k1.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k2.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, INCI > 0. TODO: If INCI is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ // used in zgessm, zgetrf_incpiv. extern "C" void magmablas_zlaswp( magma_int_t n, magmaDoubleComplex_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( n > ldda ) info = -3; else if ( k1 < 1 ) info = -4; else if ( k2 < 1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); zlaswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } zlaswp_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( n, dAT(k,0), ldda, params ); } #undef dAT } /******************************************************************************/ // Extended version has stride in both directions (ldx, ldy) // to handle both row-wise and column-wise storage. // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void zlaswpx_kernel( int n, magmaDoubleComplex *dA, int ldx, int ldy, zlaswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dA += tid*ldy; magmaDoubleComplex *A1 = dA; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaDoubleComplex *A2 = dA + i2*ldx; magmaDoubleComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldx; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= ZLASWPX performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored either row-wise or column-wise, depending on ldx and ldy. ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dA COMPLEX*16 array on GPU, dimension (*,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldx INTEGER Stride between elements in same column. @param[in] ldy INTEGER Stride between elements in same row. For A stored row-wise, set ldx=ldda and ldy=1. For A stored column-wise, set ldx=1 and ldy=ldda. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_zlaswpx( magma_int_t n, magmaDoubleComplex_ptr dA, magma_int_t ldx, magma_int_t ldy, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_)*ldx + (j_)*ldy) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); zlaswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } zlaswpx_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( n, dA(k,0), ldx, ldy, params ); } #undef dA } /******************************************************************************/ // This version takes d_ipiv on the GPU. Thus it does not pass pivots // as an argument using a structure, avoiding all the argument size // limitations of CUDA and OpenCL. It also needs just one kernel launch // with all the pivots, instead of multiple kernel launches with small // batches of pivots. On Fermi, it is faster than magmablas_zlaswp // (including copying pivots to the GPU). __global__ void zlaswp2_kernel( int n, magmaDoubleComplex *dAT, int ldda, int npivots, const magma_int_t *d_ipiv, int inci ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; magmaDoubleComplex *A1 = dAT; for( int i1 = 0; i1 < npivots; ++i1 ) { int i2 = d_ipiv[i1*inci] - 1; // Fortran index magmaDoubleComplex *A2 = dAT + i2*ldda; magmaDoubleComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= ZLASWP2 performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Here, d_ipiv is passed in GPU memory. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT COMPLEX*16 array on GPU, stored row-wise, dimension (LDDA,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. (I.e., stride between elements in a column.) @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_zlaswp2( magma_int_t n, magmaDoubleComplex_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, magmaInt_const_ptr d_ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t nb = k2-(k1-1); dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); zlaswp2_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( n, dAT(k1-1,0), ldda, nb, d_ipiv, inci ); }
2ccffeba16bc298173bb58139f1bf551fbf03085.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <rocblas.h> #include <stdlib.h> #include <assert.h> #include <math.h> // code to initialize a vector void vector_init(float *a, int n) { for(int i = 0 ; i < n ; i++) { a[i] = (float)(rand() % 100); } } // verify the result void verify_result(float *a, float *b, float *c, float factor, int n) { for(int i = 0 ; i < n ; i++) { assert(c[i] == factor * a[i] + b[i]); } } int main() { // Vector size int n = 1<<16; size_t bytes = n * sizeof(float); // Declare vector pointers float *h_a, *h_b, *h_c; float *d_a, *d_b; h_a = (float*)malloc(bytes); h_b = (float*)malloc(bytes); h_c = (float*)malloc(bytes); // Allocate the memory hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); vector_init(h_a, n); vector_init(h_b, n); // create and initialize a new context hipblasHandle_t handle; hipblasCreate(&handle); // copy the vectors to the device hipblasSetVector(n, sizeof(float), h_a, 1, d_a, 1); hipblasSetVector(n, sizeof(float), h_b, 1, d_b, 1); // Launch a simple Saxpy kernel // Functiom signature: handle, # elements n, A, increment, B, increment const float scale = 2.0f; hipblasSaxpy(handle, n, &scale, d_a, 1, d_b, 1); // Copy the result vector back out hipblasGetVector(n, sizeof(float), d_b, 1, h_c, 1); verify_result(h_a, h_b, h_c, scale, n); // clean up the created handle hipblasDestroy(handle); // Release allocated memory hipFree(d_a); hipFree(d_b); free(h_a); free(h_b); return 0; }
2ccffeba16bc298173bb58139f1bf551fbf03085.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cublas_v2.h> #include <stdlib.h> #include <assert.h> #include <math.h> // code to initialize a vector void vector_init(float *a, int n) { for(int i = 0 ; i < n ; i++) { a[i] = (float)(rand() % 100); } } // verify the result void verify_result(float *a, float *b, float *c, float factor, int n) { for(int i = 0 ; i < n ; i++) { assert(c[i] == factor * a[i] + b[i]); } } int main() { // Vector size int n = 1<<16; size_t bytes = n * sizeof(float); // Declare vector pointers float *h_a, *h_b, *h_c; float *d_a, *d_b; h_a = (float*)malloc(bytes); h_b = (float*)malloc(bytes); h_c = (float*)malloc(bytes); // Allocate the memory cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); vector_init(h_a, n); vector_init(h_b, n); // create and initialize a new context cublasHandle_t handle; cublasCreate_v2(&handle); // copy the vectors to the device cublasSetVector(n, sizeof(float), h_a, 1, d_a, 1); cublasSetVector(n, sizeof(float), h_b, 1, d_b, 1); // Launch a simple Saxpy kernel // Functiom signature: handle, # elements n, A, increment, B, increment const float scale = 2.0f; cublasSaxpy(handle, n, &scale, d_a, 1, d_b, 1); // Copy the result vector back out cublasGetVector(n, sizeof(float), d_b, 1, h_c, 1); verify_result(h_a, h_b, h_c, scale, n); // clean up the created handle cublasDestroy(handle); // Release allocated memory cudaFree(d_a); cudaFree(d_b); free(h_a); free(h_b); return 0; }
7551a15b5ccb5dbb54f9d8e74e2a81ccfd0809d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "geodesics_ptp.cuh" #include "geodesics_ptp.h" #include <cstdio> #include <fstream> #include <cassert> #include <rocblas.h> #include <thrust/count.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> using namespace std; // geometry processing and shape analysis framework namespace gproshan { double parallel_toplesets_propagation_gpu(const ptp_out_t & ptp_out, che * mesh, const vector<index_t> & sources, const toplesets_t & toplesets) { hipDeviceReset(); float time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // BEGIN PTP CHE * h_mesh = new CHE(mesh); CHE * dd_mesh, * d_mesh; cuda_create_CHE(h_mesh, dd_mesh, d_mesh); distance_t * h_dist = ptp_out.dist; distance_t * d_dist[2]; hipMalloc(&d_dist[0], sizeof(distance_t) * h_mesh->n_vertices); hipMalloc(&d_dist[1], sizeof(distance_t) * h_mesh->n_vertices); index_t * d_sorted; hipMalloc(&d_sorted, sizeof(index_t) * h_mesh->n_vertices); distance_t * d_error; hipMalloc(&d_error, sizeof(distance_t) * h_mesh->n_vertices); index_t d; if(ptp_out.clusters) { index_t * h_clusters = ptp_out.clusters; index_t * d_clusters[2] = {nullptr, nullptr}; hipMalloc(&d_clusters[0], sizeof(index_t) * h_mesh->n_vertices); hipMalloc(&d_clusters[1], sizeof(index_t) * h_mesh->n_vertices); d = run_ptp_gpu(d_mesh, h_mesh->n_vertices, h_dist, d_dist, sources, toplesets.limits, toplesets.index, d_sorted, d_error, h_clusters, d_clusters); hipMemcpy(h_clusters, d_clusters[d], sizeof(index_t) * h_mesh->n_vertices, hipMemcpyDeviceToHost); hipFree(d_clusters[0]); hipFree(d_clusters[1]); } else { d = run_ptp_gpu(d_mesh, h_mesh->n_vertices, h_dist, d_dist, sources, toplesets.limits, toplesets.index, d_sorted, d_error); } hipMemcpy(h_dist, d_dist[d], sizeof(distance_t) * h_mesh->n_vertices, hipMemcpyDeviceToHost); hipFree(d_error); hipFree(d_dist[0]); hipFree(d_dist[1]); hipFree(d_sorted); cuda_free_CHE(dd_mesh, d_mesh); // END PTP hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); return time / 1000; } distance_t farthest_point_sampling_ptp_gpu(che * mesh, vector<index_t> & samples, double & time_fps, size_t n, distance_t radio) { hipDeviceReset(); float time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // BEGIN FPS PTP CHE * h_mesh = new CHE(mesh); CHE * dd_mesh, * d_mesh; cuda_create_CHE(h_mesh, dd_mesh, d_mesh); distance_t * h_dist = new distance_t[h_mesh->n_vertices]; distance_t * d_dist[2]; hipMalloc(&d_dist[0], sizeof(distance_t) * h_mesh->n_vertices); hipMalloc(&d_dist[1], sizeof(distance_t) * h_mesh->n_vertices); distance_t * d_error; hipMalloc(&d_error, sizeof(distance_t) * h_mesh->n_vertices); index_t * d_sorted; hipMalloc(&d_sorted, sizeof(index_t) * h_mesh->n_vertices); vector<index_t> limits; index_t * toplesets = new index_t[h_mesh->n_vertices]; index_t * sorted_index = new index_t[h_mesh->n_vertices]; hipblasHandle_t handle; hipblasCreate(&handle); if(n >= h_mesh->n_vertices) n = h_mesh->n_vertices >> 1; n -= samples.size(); samples.reserve(n); index_t d; int f; distance_t max_dist = INFINITY; while(n-- && max_dist > radio) { limits.clear(); mesh->compute_toplesets(toplesets, sorted_index, limits, samples); d = run_ptp_gpu(d_mesh, h_mesh->n_vertices, h_dist, d_dist, samples, limits, sorted_index, d_sorted, d_error); // 1 indexing #ifdef SINGLE_P hipblasIsamax(handle, mesh->n_vertices(), d_dist[d], 1, &f); #else hipblasIdamax(handle, mesh->n_vertices(), d_dist[d], 1, &f); #endif if(radio > 0 || !n) hipMemcpy(&max_dist, d_dist[d] + f - 1, sizeof(distance_t), hipMemcpyDeviceToHost); samples.push_back(f - 1); } hipblasDestroy(handle); delete [] h_dist; delete [] toplesets; delete [] sorted_index; hipFree(d_error); hipFree(d_dist[0]); hipFree(d_dist[1]); hipFree(d_sorted); cuda_free_CHE(dd_mesh, d_mesh); // END FPS PTP hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); time_fps = time / 1000; hipEventDestroy(start); hipEventDestroy(stop); return max_dist; } index_t run_ptp_gpu(CHE * d_mesh, const index_t & n_vertices, distance_t * h_dist, distance_t ** d_dist, const vector<index_t> & sources, const vector<index_t> & limits, const index_t * h_sorted, index_t * d_sorted, distance_t * d_error, index_t * h_clusters, index_t ** d_clusters) { #pragma omp parallel for for(index_t v = 0; v < n_vertices; v++) h_dist[v] = INFINITY; for(index_t i = 0; i < sources.size(); i++) h_dist[sources[i]] = 0; hipMemcpy(d_dist[0], h_dist, sizeof(distance_t) * n_vertices, hipMemcpyHostToDevice); hipMemcpy(d_dist[1], h_dist, sizeof(distance_t) * n_vertices, hipMemcpyHostToDevice); hipMemcpy(d_sorted, h_sorted, sizeof(index_t) * n_vertices, hipMemcpyHostToDevice); if(h_clusters) { assert(d_clusters); for(index_t i = 0; i < sources.size(); i++) h_clusters[sources[i]] = i + 1; hipMemcpy(d_clusters[0], h_clusters, sizeof(index_t) * n_vertices, hipMemcpyHostToDevice); hipMemcpy(d_clusters[1], h_clusters, sizeof(index_t) * n_vertices, hipMemcpyHostToDevice); } index_t d = 0; index_t start, end, n_cond; index_t i = 1, j = 2; // maximum number of iterations index_t iter = 0; index_t max_iter = limits.size() << 1; while(i < j && iter++ < max_iter) { if(i < (j >> 1)) i = (j >> 1); // K/2 limit band size start = limits[i]; end = limits[j]; n_cond = limits[i + 1] - start; if(h_clusters) hipLaunchKernelGGL(( relax_ptp) , dim3(NB(end - start)), dim3(NT) , 0, 0, d_mesh, d_dist[!d], d_dist[d], d_clusters[!d], d_clusters[d], d_sorted, end, start); else hipLaunchKernelGGL(( relax_ptp) , dim3(NB(end - start)), dim3(NT) , 0, 0, d_mesh, d_dist[!d], d_dist[d], d_sorted, end, start); hipDeviceSynchronize(); hipLaunchKernelGGL(( relative_error) , dim3(NB(n_cond)), dim3(NT) , 0, 0, d_error, d_dist[!d], d_dist[d], start, start + n_cond, d_sorted); hipDeviceSynchronize(); if(n_cond == thrust::count_if(thrust::device, d_error + start, d_error + start + n_cond, is_ok())) i++; if(j < limits.size() - 1) j++; d = !d; } return d; } __global__ void relax_ptp(CHE * mesh, distance_t * new_dist, distance_t * old_dist, index_t * sorted, index_t end, index_t start) { index_t v = blockDim.x * blockIdx.x + threadIdx.x + start; if(v < end) { v = sorted[v]; if(v < mesh->n_vertices) { new_dist[v] = old_dist[v]; distance_t d; cu_for_star(he, mesh, v) { d = cu_update_step(mesh, old_dist, he); if(d < new_dist[v]) new_dist[v] = d; } } } } __global__ void relax_ptp(CHE * mesh, distance_t * new_dist, distance_t * old_dist, index_t * new_clusters, index_t * old_clusters, index_t * sorted, index_t end, index_t start) { index_t v = blockDim.x * blockIdx.x + threadIdx.x + start; if(v < end) { v = sorted[v]; if(v < mesh->n_vertices) { new_dist[v] = old_dist[v]; new_clusters[v] = old_clusters[v]; distance_t d; cu_for_star(he, mesh, v) { d = cu_update_step(mesh, old_dist, he); if(d < new_dist[v]) { new_dist[v] = d; new_clusters[v] = old_dist[mesh->VT[cu_prev(he)]] < old_dist[mesh->VT[cu_next(he)]] ? old_clusters[mesh->VT[cu_prev(he)]] : old_clusters[mesh->VT[cu_next(he)]]; } } } } } __global__ void relative_error(distance_t * error, const distance_t * new_dist, const distance_t * old_dist, const index_t start, const index_t end, const index_t * sorted) { index_t i = blockDim.x * blockIdx.x + threadIdx.x + start; if(i < end) { index_t v = sorted ? sorted[i] : i; #ifdef SINGLE_P error[i] = fabsf(new_dist[v] - old_dist[v]) / old_dist[v]; #else error[i] = fabs(new_dist[v] - old_dist[v]) / old_dist[v]; #endif } } __forceinline__ __device__ distance_t cu_update_step(CHE * mesh, const distance_t * dist, const index_t & he) { index_t x[3]; x[0] = mesh->VT[cu_next(he)]; x[1] = mesh->VT[cu_prev(he)]; x[2] = mesh->VT[he]; vertex_cu X[2]; X[0] = mesh->GT[x[0]] - mesh->GT[x[2]]; X[1] = mesh->GT[x[1]] - mesh->GT[x[2]]; distance_t t[2]; t[0] = dist[x[0]]; t[1] = dist[x[1]]; distance_t q[2][2]; q[0][0] = (X[0], X[0]); q[0][1] = (X[0], X[1]); q[1][0] = (X[1], X[0]); q[1][1] = (X[1], X[1]); distance_t det = q[0][0] * q[1][1] - q[0][1] * q[1][0]; distance_t Q[2][2]; Q[0][0] = q[1][1] / det; Q[0][1] = -q[0][1] / det; Q[1][0] = -q[1][0] / det; Q[1][1] = q[0][0] / det; distance_t delta = t[0] * (Q[0][0] + Q[1][0]) + t[1] * (Q[0][1] + Q[1][1]); distance_t dis = delta * delta - (Q[0][0] + Q[0][1] + Q[1][0] + Q[1][1]) * (t[0] * t[0] * Q[0][0] + t[0] * t[1] * (Q[1][0] + Q[0][1]) + t[1] * t[1] * Q[1][1] - 1); #ifdef SINGLE_P distance_t p = delta + sqrtf(dis); #else distance_t p = delta + sqrt(dis); #endif p /= Q[0][0] + Q[0][1] + Q[1][0] + Q[1][1]; distance_t tp[2]; tp[0] = t[0] - p; tp[1] = t[1] - p; vertex_cu n(tp[0] * (X[0][0]*Q[0][0] + X[1][0]*Q[1][0]) + tp[1] * (X[0][0]*Q[0][1] + X[1][0]*Q[1][1]), tp[0] * (X[0][1]*Q[0][0] + X[1][1]*Q[1][0]) + tp[1] * (X[0][1]*Q[0][1] + X[1][1]*Q[1][1]), tp[0] * (X[0][2]*Q[0][0] + X[1][2]*Q[1][0]) + tp[1] * (X[0][2]*Q[0][1] + X[1][2]*Q[1][1]) ); distance_t cond[2]; cond[0] = (X[0] , n); cond[1] = (X[1] , n); distance_t c[2]; c[0] = cond[0] * Q[0][0] + cond[1] * Q[0][1]; c[1] = cond[0] * Q[1][0] + cond[1] * Q[1][1]; if(t[0] == INFINITY || t[1] == INFINITY || dis < 0 || c[0] >= 0 || c[1] >= 0) { distance_t dp[2]; dp[0] = dist[x[0]] + *X[0]; dp[1] = dist[x[1]] + *X[1]; p = dp[dp[1] < dp[0]]; } return p; } __host__ __device__ bool is_ok::operator()(const distance_t & val) const { return val < PTP_TOL; } } // namespace gproshan
7551a15b5ccb5dbb54f9d8e74e2a81ccfd0809d3.cu
#include "geodesics_ptp.cuh" #include "geodesics_ptp.h" #include <cstdio> #include <fstream> #include <cassert> #include <cublas_v2.h> #include <thrust/count.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> using namespace std; // geometry processing and shape analysis framework namespace gproshan { double parallel_toplesets_propagation_gpu(const ptp_out_t & ptp_out, che * mesh, const vector<index_t> & sources, const toplesets_t & toplesets) { cudaDeviceReset(); float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // BEGIN PTP CHE * h_mesh = new CHE(mesh); CHE * dd_mesh, * d_mesh; cuda_create_CHE(h_mesh, dd_mesh, d_mesh); distance_t * h_dist = ptp_out.dist; distance_t * d_dist[2]; cudaMalloc(&d_dist[0], sizeof(distance_t) * h_mesh->n_vertices); cudaMalloc(&d_dist[1], sizeof(distance_t) * h_mesh->n_vertices); index_t * d_sorted; cudaMalloc(&d_sorted, sizeof(index_t) * h_mesh->n_vertices); distance_t * d_error; cudaMalloc(&d_error, sizeof(distance_t) * h_mesh->n_vertices); index_t d; if(ptp_out.clusters) { index_t * h_clusters = ptp_out.clusters; index_t * d_clusters[2] = {nullptr, nullptr}; cudaMalloc(&d_clusters[0], sizeof(index_t) * h_mesh->n_vertices); cudaMalloc(&d_clusters[1], sizeof(index_t) * h_mesh->n_vertices); d = run_ptp_gpu(d_mesh, h_mesh->n_vertices, h_dist, d_dist, sources, toplesets.limits, toplesets.index, d_sorted, d_error, h_clusters, d_clusters); cudaMemcpy(h_clusters, d_clusters[d], sizeof(index_t) * h_mesh->n_vertices, cudaMemcpyDeviceToHost); cudaFree(d_clusters[0]); cudaFree(d_clusters[1]); } else { d = run_ptp_gpu(d_mesh, h_mesh->n_vertices, h_dist, d_dist, sources, toplesets.limits, toplesets.index, d_sorted, d_error); } cudaMemcpy(h_dist, d_dist[d], sizeof(distance_t) * h_mesh->n_vertices, cudaMemcpyDeviceToHost); cudaFree(d_error); cudaFree(d_dist[0]); cudaFree(d_dist[1]); cudaFree(d_sorted); cuda_free_CHE(dd_mesh, d_mesh); // END PTP cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); return time / 1000; } distance_t farthest_point_sampling_ptp_gpu(che * mesh, vector<index_t> & samples, double & time_fps, size_t n, distance_t radio) { cudaDeviceReset(); float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // BEGIN FPS PTP CHE * h_mesh = new CHE(mesh); CHE * dd_mesh, * d_mesh; cuda_create_CHE(h_mesh, dd_mesh, d_mesh); distance_t * h_dist = new distance_t[h_mesh->n_vertices]; distance_t * d_dist[2]; cudaMalloc(&d_dist[0], sizeof(distance_t) * h_mesh->n_vertices); cudaMalloc(&d_dist[1], sizeof(distance_t) * h_mesh->n_vertices); distance_t * d_error; cudaMalloc(&d_error, sizeof(distance_t) * h_mesh->n_vertices); index_t * d_sorted; cudaMalloc(&d_sorted, sizeof(index_t) * h_mesh->n_vertices); vector<index_t> limits; index_t * toplesets = new index_t[h_mesh->n_vertices]; index_t * sorted_index = new index_t[h_mesh->n_vertices]; cublasHandle_t handle; cublasCreate(&handle); if(n >= h_mesh->n_vertices) n = h_mesh->n_vertices >> 1; n -= samples.size(); samples.reserve(n); index_t d; int f; distance_t max_dist = INFINITY; while(n-- && max_dist > radio) { limits.clear(); mesh->compute_toplesets(toplesets, sorted_index, limits, samples); d = run_ptp_gpu(d_mesh, h_mesh->n_vertices, h_dist, d_dist, samples, limits, sorted_index, d_sorted, d_error); // 1 indexing #ifdef SINGLE_P cublasIsamax(handle, mesh->n_vertices(), d_dist[d], 1, &f); #else cublasIdamax(handle, mesh->n_vertices(), d_dist[d], 1, &f); #endif if(radio > 0 || !n) cudaMemcpy(&max_dist, d_dist[d] + f - 1, sizeof(distance_t), cudaMemcpyDeviceToHost); samples.push_back(f - 1); } cublasDestroy(handle); delete [] h_dist; delete [] toplesets; delete [] sorted_index; cudaFree(d_error); cudaFree(d_dist[0]); cudaFree(d_dist[1]); cudaFree(d_sorted); cuda_free_CHE(dd_mesh, d_mesh); // END FPS PTP cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); time_fps = time / 1000; cudaEventDestroy(start); cudaEventDestroy(stop); return max_dist; } index_t run_ptp_gpu(CHE * d_mesh, const index_t & n_vertices, distance_t * h_dist, distance_t ** d_dist, const vector<index_t> & sources, const vector<index_t> & limits, const index_t * h_sorted, index_t * d_sorted, distance_t * d_error, index_t * h_clusters, index_t ** d_clusters) { #pragma omp parallel for for(index_t v = 0; v < n_vertices; v++) h_dist[v] = INFINITY; for(index_t i = 0; i < sources.size(); i++) h_dist[sources[i]] = 0; cudaMemcpy(d_dist[0], h_dist, sizeof(distance_t) * n_vertices, cudaMemcpyHostToDevice); cudaMemcpy(d_dist[1], h_dist, sizeof(distance_t) * n_vertices, cudaMemcpyHostToDevice); cudaMemcpy(d_sorted, h_sorted, sizeof(index_t) * n_vertices, cudaMemcpyHostToDevice); if(h_clusters) { assert(d_clusters); for(index_t i = 0; i < sources.size(); i++) h_clusters[sources[i]] = i + 1; cudaMemcpy(d_clusters[0], h_clusters, sizeof(index_t) * n_vertices, cudaMemcpyHostToDevice); cudaMemcpy(d_clusters[1], h_clusters, sizeof(index_t) * n_vertices, cudaMemcpyHostToDevice); } index_t d = 0; index_t start, end, n_cond; index_t i = 1, j = 2; // maximum number of iterations index_t iter = 0; index_t max_iter = limits.size() << 1; while(i < j && iter++ < max_iter) { if(i < (j >> 1)) i = (j >> 1); // K/2 limit band size start = limits[i]; end = limits[j]; n_cond = limits[i + 1] - start; if(h_clusters) relax_ptp <<< NB(end - start), NT >>> (d_mesh, d_dist[!d], d_dist[d], d_clusters[!d], d_clusters[d], d_sorted, end, start); else relax_ptp <<< NB(end - start), NT >>> (d_mesh, d_dist[!d], d_dist[d], d_sorted, end, start); cudaDeviceSynchronize(); relative_error <<< NB(n_cond), NT >>> (d_error, d_dist[!d], d_dist[d], start, start + n_cond, d_sorted); cudaDeviceSynchronize(); if(n_cond == thrust::count_if(thrust::device, d_error + start, d_error + start + n_cond, is_ok())) i++; if(j < limits.size() - 1) j++; d = !d; } return d; } __global__ void relax_ptp(CHE * mesh, distance_t * new_dist, distance_t * old_dist, index_t * sorted, index_t end, index_t start) { index_t v = blockDim.x * blockIdx.x + threadIdx.x + start; if(v < end) { v = sorted[v]; if(v < mesh->n_vertices) { new_dist[v] = old_dist[v]; distance_t d; cu_for_star(he, mesh, v) { d = cu_update_step(mesh, old_dist, he); if(d < new_dist[v]) new_dist[v] = d; } } } } __global__ void relax_ptp(CHE * mesh, distance_t * new_dist, distance_t * old_dist, index_t * new_clusters, index_t * old_clusters, index_t * sorted, index_t end, index_t start) { index_t v = blockDim.x * blockIdx.x + threadIdx.x + start; if(v < end) { v = sorted[v]; if(v < mesh->n_vertices) { new_dist[v] = old_dist[v]; new_clusters[v] = old_clusters[v]; distance_t d; cu_for_star(he, mesh, v) { d = cu_update_step(mesh, old_dist, he); if(d < new_dist[v]) { new_dist[v] = d; new_clusters[v] = old_dist[mesh->VT[cu_prev(he)]] < old_dist[mesh->VT[cu_next(he)]] ? old_clusters[mesh->VT[cu_prev(he)]] : old_clusters[mesh->VT[cu_next(he)]]; } } } } } __global__ void relative_error(distance_t * error, const distance_t * new_dist, const distance_t * old_dist, const index_t start, const index_t end, const index_t * sorted) { index_t i = blockDim.x * blockIdx.x + threadIdx.x + start; if(i < end) { index_t v = sorted ? sorted[i] : i; #ifdef SINGLE_P error[i] = fabsf(new_dist[v] - old_dist[v]) / old_dist[v]; #else error[i] = fabs(new_dist[v] - old_dist[v]) / old_dist[v]; #endif } } __forceinline__ __device__ distance_t cu_update_step(CHE * mesh, const distance_t * dist, const index_t & he) { index_t x[3]; x[0] = mesh->VT[cu_next(he)]; x[1] = mesh->VT[cu_prev(he)]; x[2] = mesh->VT[he]; vertex_cu X[2]; X[0] = mesh->GT[x[0]] - mesh->GT[x[2]]; X[1] = mesh->GT[x[1]] - mesh->GT[x[2]]; distance_t t[2]; t[0] = dist[x[0]]; t[1] = dist[x[1]]; distance_t q[2][2]; q[0][0] = (X[0], X[0]); q[0][1] = (X[0], X[1]); q[1][0] = (X[1], X[0]); q[1][1] = (X[1], X[1]); distance_t det = q[0][0] * q[1][1] - q[0][1] * q[1][0]; distance_t Q[2][2]; Q[0][0] = q[1][1] / det; Q[0][1] = -q[0][1] / det; Q[1][0] = -q[1][0] / det; Q[1][1] = q[0][0] / det; distance_t delta = t[0] * (Q[0][0] + Q[1][0]) + t[1] * (Q[0][1] + Q[1][1]); distance_t dis = delta * delta - (Q[0][0] + Q[0][1] + Q[1][0] + Q[1][1]) * (t[0] * t[0] * Q[0][0] + t[0] * t[1] * (Q[1][0] + Q[0][1]) + t[1] * t[1] * Q[1][1] - 1); #ifdef SINGLE_P distance_t p = delta + sqrtf(dis); #else distance_t p = delta + sqrt(dis); #endif p /= Q[0][0] + Q[0][1] + Q[1][0] + Q[1][1]; distance_t tp[2]; tp[0] = t[0] - p; tp[1] = t[1] - p; vertex_cu n(tp[0] * (X[0][0]*Q[0][0] + X[1][0]*Q[1][0]) + tp[1] * (X[0][0]*Q[0][1] + X[1][0]*Q[1][1]), tp[0] * (X[0][1]*Q[0][0] + X[1][1]*Q[1][0]) + tp[1] * (X[0][1]*Q[0][1] + X[1][1]*Q[1][1]), tp[0] * (X[0][2]*Q[0][0] + X[1][2]*Q[1][0]) + tp[1] * (X[0][2]*Q[0][1] + X[1][2]*Q[1][1]) ); distance_t cond[2]; cond[0] = (X[0] , n); cond[1] = (X[1] , n); distance_t c[2]; c[0] = cond[0] * Q[0][0] + cond[1] * Q[0][1]; c[1] = cond[0] * Q[1][0] + cond[1] * Q[1][1]; if(t[0] == INFINITY || t[1] == INFINITY || dis < 0 || c[0] >= 0 || c[1] >= 0) { distance_t dp[2]; dp[0] = dist[x[0]] + *X[0]; dp[1] = dist[x[1]] + *X[1]; p = dp[dp[1] < dp[0]]; } return p; } __host__ __device__ bool is_ok::operator()(const distance_t & val) const { return val < PTP_TOL; } } // namespace gproshan
abf565453a748a6045b20944136e475f073d7db3.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <chrono> #include <algorithm> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" #define BBCU_BATCHNORM_FW_BLOCK_SIZE 128 #define BBCU_BATCHNORM_BW_BLOCK_SIZE 128 ////////////////////////////// // common ////////////////////////////// __device__ __forceinline__ float device_fp32_LocalSum(float v, float *buf) { buf[threadIdx.x] = v; __syncthreads(); // int comb = 1; while (comb < blockDim.x) { int next = comb * 2; int mask = next - 1; if ((threadIdx.x & mask) == 0) { buf[threadIdx.x] += buf[threadIdx.x + comb]; } comb = next; __syncthreads(); } float sum = buf[0]; __syncthreads(); return sum; } ////////////////////////////// // forward training ////////////////////////////// __global__ void kernal_fp32_BatchNormalization_ForwardTraining( const float *x_buf, float *y_buf, float const *gamma_buf, float const *beta_buf, float *mean_buf, float *rstd_buf, float *running_mean_buf, float *running_var_buf, float momentum, float reciprocal_frame_size, int frame_size, int frame_stride ) { __shared__ float buf[BBCU_BATCHNORM_FW_BLOCK_SIZE]; // int const node = blockIdx.x; int const id = threadIdx.x; int const id_step = blockDim.x; // (Kahan summation algorithm) float s1 = 0, c1 = 0, y1, t1; float s2 = 0, c2 = 0, y2, t2; const float* x_ptr = &x_buf[frame_stride * node]; for ( int frame = id; frame < frame_size; frame += id_step) { float x = x_ptr[frame]; y1 = x - c1; t1 = s1 + y1; c1 = (t1 - s1) - y1; s1 = t1; y2 = (x * x) - c2; t2 = s2 + y2; c2 = (t2 - s2) - y2; s2 = t2; } // s1 = device_fp32_LocalSum(s1, buf); s2 = device_fp32_LocalSum(s2, buf); float mean = s1 * reciprocal_frame_size; float var = max(1.0e-7f, (s2 * reciprocal_frame_size) - (mean * mean)); float rstd = rsqrt(var); if (id == 0) { running_mean_buf[node] = (running_mean_buf[node] * momentum) + (mean * (1.0 - momentum)); running_var_buf[node] = (running_var_buf[node] * momentum) + (var * (1.0 - momentum)); mean_buf[node] = mean; rstd_buf[node] = rstd; } // float gamma = gamma_buf[node]; float beta = beta_buf[node]; float* y_ptr = &y_buf[frame_stride * node]; for ( int frame = id; frame < frame_size; frame += id_step) { float x = x_ptr[frame]; x = (x - mean) * rstd; x = x * gamma + beta; y_ptr[frame] = x; } } BBCU_DLL_EXPORT int bbcu_fp32_BatchNormalization_ForwardTraining ( float const *dev_x_buf, float *dev_y_buf, float const *dev_gamma_buf, float const *dev_beta_buf, float *dev_mean_buf, float *dev_rstd_buf, float *dev_running_mean_buf, float *dev_running_var_buf, float momentum, int node_size, int frame_size, int frame_stride, hipStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 grid(node_size); dim3 block(BBCU_BATCHNORM_FW_BLOCK_SIZE); hipLaunchKernelGGL(( kernal_fp32_BatchNormalization_ForwardTraining), dim3(grid), dim3(block), 0, streamId, dev_x_buf, dev_y_buf, dev_gamma_buf, dev_beta_buf, dev_mean_buf, dev_rstd_buf, dev_running_mean_buf, dev_running_var_buf, momentum, 1.0f/ frame_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } ////////////////////////////// // ReForward ////////////////////////////// __global__ void kernal_fp32_BatchNormalization_ReForward( const float *x_buf, float *y_buf, float const *gamma_buf, float const *beta_buf, float const *mean_buf, float const *rstd_buf, int frame_size, int frame_stride ) { // int const node = blockIdx.x; int const id = threadIdx.x; int const id_step = blockDim.x; float mean = mean_buf[node]; float rstd = rstd_buf[node]; float gamma = gamma_buf[node]; float beta = beta_buf[node]; float const *x_ptr = &x_buf[frame_stride * node]; float *y_ptr = &y_buf[frame_stride * node]; for ( int frame = id; frame < frame_size; frame += id_step) { float x = x_ptr[frame]; x = (x - mean) * rstd; x = x * gamma + beta; y_ptr[frame] = x; } } BBCU_DLL_EXPORT int bbcu_fp32_BatchNormalization_ReForward ( float const *dev_x_buf, float *dev_y_buf, float const *dev_gamma_buf, float const *dev_beta_buf, float const *dev_mean_buf, float const *dev_rstd_buf, int node_size, int frame_size, int frame_stride, hipStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 grid(node_size); dim3 block(BBCU_BATCHNORM_FW_BLOCK_SIZE); hipLaunchKernelGGL(( kernal_fp32_BatchNormalization_ReForward), dim3(grid), dim3(block), 0, streamId, dev_x_buf, dev_y_buf, dev_gamma_buf, dev_beta_buf, dev_mean_buf, dev_rstd_buf, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } ////////////////////////////// // Forward Inference ////////////////////////////// __global__ void kernal_fp32_BatchNormalization_ForwardInference( const float *x_buf, float *y_buf, float const *gamma_buf, float const *beta_buf, float const *running_mean_buf, float const *running_var_buf, int node_size, int frame_size, int frame_stride ) { int node = blockDim.y * blockIdx.y + threadIdx.y; int id = threadIdx.x; int id_step = blockDim.x; if ( node >= node_size) { return; } float gamma = gamma_buf[node]; float beta = beta_buf[node]; float mean = running_mean_buf[node]; float var = running_var_buf[node]; float rstd = 1.0 / (sqrt(var) + 1.0e-7); float const *x_ptr = &x_buf[frame_stride * node]; float *y_ptr = &y_buf[frame_stride * node]; for ( int frame = id; frame < frame_size; frame += id_step ) { float x = x_ptr[frame]; y_ptr[frame] = ((x - mean) * rstd) * gamma + beta; } } BBCU_DLL_EXPORT int bbcu_fp32_BatchNormalization_ForwardInference ( float const *dev_x_buf, float *dev_y_buf, float const *dev_gamma_buf, float const *dev_beta_buf, float const *dev_running_mean_buf, float const *dev_running_var_buf, int node_size, int frame_size, int frame_stride, hipStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); unsigned int const THREAD_SIZE = 1024; unsigned int const MAX_FRAME_UNIT = 1024; unsigned int const MAX_NODE_UNIT = 1024; #if 1 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= node_size ) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; } #endif block.x = ::min(block.x, MAX_FRAME_UNIT); block.y = ::min(block.y, MAX_NODE_UNIT); dim3 grid(1, (node_size + (block.y - 1)) / block.y); hipLaunchKernelGGL(( kernal_fp32_BatchNormalization_ForwardInference), dim3(grid), dim3(block), 0, streamId, dev_x_buf, dev_y_buf, dev_gamma_buf, dev_beta_buf, dev_running_mean_buf, dev_running_var_buf, node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } ////////////////////////////// // Backward ////////////////////////////// __global__ void kernal_fp32_BatchNormalization_Backward ( float const *x_buf, float const *dy_buf, float *dx_buf, float const *gamma_buf, float *dgamma_buf, float *dbeta_buf, float const *mean_buf, float const *rstd_buf, float reciprocal_frame_size, int frame_size, int frame_stride ) { __shared__ float buf[BBCU_BATCHNORM_BW_BLOCK_SIZE]; // int const node = blockIdx.x; int const id = threadIdx.x; int const id_step = blockDim.x; float mean = mean_buf[node]; float rstd = rstd_buf[node]; float gamma = gamma_buf[node]; float dgamma = 0; float dbeta = 0; float dmeanx = 0; float dstd = 0; float dgamma_prev; float dbeta_prev; if (id == 0) { dgamma_prev = dgamma_buf[node]; dbeta_prev = dbeta_buf[node]; } float rstd2 = rstd * rstd; float const * const x_ptr = &x_buf[node * frame_stride]; float const * const dy_ptr = &dy_buf[node * frame_stride]; for ( int frame = id; frame < frame_size; frame += id_step) { float x = x_ptr[frame]; float dy = dy_ptr[frame]; float xc = x - mean; float xn = xc * rstd; dbeta += dy; dgamma += xn * dy; float dxn = gamma * dy; dstd += -(dxn * xc * rstd2); dmeanx += -(dxn * rstd); } dbeta = device_fp32_LocalSum(dbeta, buf); dgamma = device_fp32_LocalSum(dgamma, buf); if (id == 0) { dgamma_buf[node] = dgamma + dgamma_prev; dbeta_buf[node] = dbeta + dbeta_prev; } dstd = device_fp32_LocalSum(dstd, buf); dmeanx = device_fp32_LocalSum(dmeanx, buf); float * const dx_ptr = &dx_buf[node * frame_stride]; float dvar = dstd * rstd; float dmean = (dmeanx - (mean * dvar)) * reciprocal_frame_size; for ( int frame = id; frame < frame_size; frame += id_step) { float dy = dy_ptr[frame]; float x = x_ptr[frame]; float dxn = dy * gamma; float dxc = dxn * rstd; float dx = dxc + dmean + (x * dvar * reciprocal_frame_size); dx_ptr[frame] = dx; } } BBCU_DLL_EXPORT int bbcu_fp32_BatchNormalization_Backward ( const float *dev_x_buf, const float *dev_dy_buf, float *dev_dx_buf, float const *dev_gamma_buf, float *dev_dgamma_buf, float *dev_dbeta_buf, float const *dev_mean_buf, float const *dev_rstd_buf, float reciprocal_frame_size, int node_size, int frame_size, int frame_stride, hipStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 grid(node_size); dim3 block(BBCU_BATCHNORM_BW_BLOCK_SIZE); kernal_fp32_BatchNormalization_Backward << <grid, block, 0, streamId >> > ( dev_x_buf, dev_dy_buf, dev_dx_buf, dev_gamma_buf, dev_dgamma_buf, dev_dbeta_buf, dev_mean_buf, dev_rstd_buf, reciprocal_frame_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; }
abf565453a748a6045b20944136e475f073d7db3.cu
#include <iostream> #include <chrono> #include <algorithm> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" #define BBCU_BATCHNORM_FW_BLOCK_SIZE 128 #define BBCU_BATCHNORM_BW_BLOCK_SIZE 128 ////////////////////////////// // common ////////////////////////////// __device__ __forceinline__ float device_fp32_LocalSum(float v, float *buf) { buf[threadIdx.x] = v; __syncthreads(); // スレッド間集計 int comb = 1; while (comb < blockDim.x) { int next = comb * 2; int mask = next - 1; if ((threadIdx.x & mask) == 0) { buf[threadIdx.x] += buf[threadIdx.x + comb]; } comb = next; __syncthreads(); } float sum = buf[0]; __syncthreads(); return sum; } ////////////////////////////// // forward training ////////////////////////////// __global__ void kernal_fp32_BatchNormalization_ForwardTraining( const float *x_buf, float *y_buf, float const *gamma_buf, float const *beta_buf, float *mean_buf, float *rstd_buf, float *running_mean_buf, float *running_var_buf, float momentum, float reciprocal_frame_size, int frame_size, int frame_stride ) { __shared__ float buf[BBCU_BATCHNORM_FW_BLOCK_SIZE]; // 初期化 int const node = blockIdx.x; int const id = threadIdx.x; int const id_step = blockDim.x; // カハンの加算アルゴリズム(Kahan summation algorithm) float s1 = 0, c1 = 0, y1, t1; float s2 = 0, c2 = 0, y2, t2; const float* x_ptr = &x_buf[frame_stride * node]; for ( int frame = id; frame < frame_size; frame += id_step) { float x = x_ptr[frame]; y1 = x - c1; t1 = s1 + y1; c1 = (t1 - s1) - y1; s1 = t1; y2 = (x * x) - c2; t2 = s2 + y2; c2 = (t2 - s2) - y2; s2 = t2; } // 集計 s1 = device_fp32_LocalSum(s1, buf); s2 = device_fp32_LocalSum(s2, buf); float mean = s1 * reciprocal_frame_size; float var = max(1.0e-7f, (s2 * reciprocal_frame_size) - (mean * mean)); float rstd = rsqrt(var); if (id == 0) { running_mean_buf[node] = (running_mean_buf[node] * momentum) + (mean * (1.0 - momentum)); running_var_buf[node] = (running_var_buf[node] * momentum) + (var * (1.0 - momentum)); mean_buf[node] = mean; rstd_buf[node] = rstd; } // 正規化 float gamma = gamma_buf[node]; float beta = beta_buf[node]; float* y_ptr = &y_buf[frame_stride * node]; for ( int frame = id; frame < frame_size; frame += id_step) { float x = x_ptr[frame]; x = (x - mean) * rstd; x = x * gamma + beta; y_ptr[frame] = x; } } BBCU_DLL_EXPORT int bbcu_fp32_BatchNormalization_ForwardTraining ( float const *dev_x_buf, float *dev_y_buf, float const *dev_gamma_buf, float const *dev_beta_buf, float *dev_mean_buf, float *dev_rstd_buf, float *dev_running_mean_buf, float *dev_running_var_buf, float momentum, int node_size, int frame_size, int frame_stride, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 grid(node_size); dim3 block(BBCU_BATCHNORM_FW_BLOCK_SIZE); kernal_fp32_BatchNormalization_ForwardTraining<<<grid, block, 0, streamId>>> ( dev_x_buf, dev_y_buf, dev_gamma_buf, dev_beta_buf, dev_mean_buf, dev_rstd_buf, dev_running_mean_buf, dev_running_var_buf, momentum, 1.0f/ frame_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } ////////////////////////////// // ReForward ////////////////////////////// __global__ void kernal_fp32_BatchNormalization_ReForward( const float *x_buf, float *y_buf, float const *gamma_buf, float const *beta_buf, float const *mean_buf, float const *rstd_buf, int frame_size, int frame_stride ) { // 初期化 int const node = blockIdx.x; int const id = threadIdx.x; int const id_step = blockDim.x; float mean = mean_buf[node]; float rstd = rstd_buf[node]; float gamma = gamma_buf[node]; float beta = beta_buf[node]; float const *x_ptr = &x_buf[frame_stride * node]; float *y_ptr = &y_buf[frame_stride * node]; for ( int frame = id; frame < frame_size; frame += id_step) { float x = x_ptr[frame]; x = (x - mean) * rstd; x = x * gamma + beta; y_ptr[frame] = x; } } BBCU_DLL_EXPORT int bbcu_fp32_BatchNormalization_ReForward ( float const *dev_x_buf, float *dev_y_buf, float const *dev_gamma_buf, float const *dev_beta_buf, float const *dev_mean_buf, float const *dev_rstd_buf, int node_size, int frame_size, int frame_stride, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 grid(node_size); dim3 block(BBCU_BATCHNORM_FW_BLOCK_SIZE); kernal_fp32_BatchNormalization_ReForward<<<grid, block, 0, streamId>>> ( dev_x_buf, dev_y_buf, dev_gamma_buf, dev_beta_buf, dev_mean_buf, dev_rstd_buf, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } ////////////////////////////// // Forward Inference ////////////////////////////// __global__ void kernal_fp32_BatchNormalization_ForwardInference( const float *x_buf, float *y_buf, float const *gamma_buf, float const *beta_buf, float const *running_mean_buf, float const *running_var_buf, int node_size, int frame_size, int frame_stride ) { int node = blockDim.y * blockIdx.y + threadIdx.y; int id = threadIdx.x; int id_step = blockDim.x; if ( node >= node_size) { return; } float gamma = gamma_buf[node]; float beta = beta_buf[node]; float mean = running_mean_buf[node]; float var = running_var_buf[node]; float rstd = 1.0 / (sqrt(var) + 1.0e-7); float const *x_ptr = &x_buf[frame_stride * node]; float *y_ptr = &y_buf[frame_stride * node]; for ( int frame = id; frame < frame_size; frame += id_step ) { float x = x_ptr[frame]; y_ptr[frame] = ((x - mean) * rstd) * gamma + beta; } } BBCU_DLL_EXPORT int bbcu_fp32_BatchNormalization_ForwardInference ( float const *dev_x_buf, float *dev_y_buf, float const *dev_gamma_buf, float const *dev_beta_buf, float const *dev_running_mean_buf, float const *dev_running_var_buf, int node_size, int frame_size, int frame_stride, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); unsigned int const THREAD_SIZE = 1024; unsigned int const MAX_FRAME_UNIT = 1024; unsigned int const MAX_NODE_UNIT = 1024; #if 1 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= node_size ) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; } #endif block.x = std::min(block.x, MAX_FRAME_UNIT); block.y = std::min(block.y, MAX_NODE_UNIT); dim3 grid(1, (node_size + (block.y - 1)) / block.y); kernal_fp32_BatchNormalization_ForwardInference<<<grid, block, 0, streamId>>> ( dev_x_buf, dev_y_buf, dev_gamma_buf, dev_beta_buf, dev_running_mean_buf, dev_running_var_buf, node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } ////////////////////////////// // Backward ////////////////////////////// __global__ void kernal_fp32_BatchNormalization_Backward ( float const *x_buf, float const *dy_buf, float *dx_buf, float const *gamma_buf, float *dgamma_buf, float *dbeta_buf, float const *mean_buf, float const *rstd_buf, float reciprocal_frame_size, int frame_size, int frame_stride ) { __shared__ float buf[BBCU_BATCHNORM_BW_BLOCK_SIZE]; // 初期化 int const node = blockIdx.x; int const id = threadIdx.x; int const id_step = blockDim.x; float mean = mean_buf[node]; float rstd = rstd_buf[node]; float gamma = gamma_buf[node]; float dgamma = 0; float dbeta = 0; float dmeanx = 0; float dstd = 0; float dgamma_prev; float dbeta_prev; if (id == 0) { dgamma_prev = dgamma_buf[node]; dbeta_prev = dbeta_buf[node]; } float rstd2 = rstd * rstd; float const * const x_ptr = &x_buf[node * frame_stride]; float const * const dy_ptr = &dy_buf[node * frame_stride]; for ( int frame = id; frame < frame_size; frame += id_step) { float x = x_ptr[frame]; float dy = dy_ptr[frame]; float xc = x - mean; float xn = xc * rstd; dbeta += dy; dgamma += xn * dy; float dxn = gamma * dy; dstd += -(dxn * xc * rstd2); dmeanx += -(dxn * rstd); } dbeta = device_fp32_LocalSum(dbeta, buf); dgamma = device_fp32_LocalSum(dgamma, buf); if (id == 0) { dgamma_buf[node] = dgamma + dgamma_prev; dbeta_buf[node] = dbeta + dbeta_prev; } dstd = device_fp32_LocalSum(dstd, buf); dmeanx = device_fp32_LocalSum(dmeanx, buf); float * const dx_ptr = &dx_buf[node * frame_stride]; float dvar = dstd * rstd; float dmean = (dmeanx - (mean * dvar)) * reciprocal_frame_size; for ( int frame = id; frame < frame_size; frame += id_step) { float dy = dy_ptr[frame]; float x = x_ptr[frame]; float dxn = dy * gamma; float dxc = dxn * rstd; float dx = dxc + dmean + (x * dvar * reciprocal_frame_size); dx_ptr[frame] = dx; } } BBCU_DLL_EXPORT int bbcu_fp32_BatchNormalization_Backward ( const float *dev_x_buf, const float *dev_dy_buf, float *dev_dx_buf, float const *dev_gamma_buf, float *dev_dgamma_buf, float *dev_dbeta_buf, float const *dev_mean_buf, float const *dev_rstd_buf, float reciprocal_frame_size, int node_size, int frame_size, int frame_stride, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 grid(node_size); dim3 block(BBCU_BATCHNORM_BW_BLOCK_SIZE); kernal_fp32_BatchNormalization_Backward << <grid, block, 0, streamId >> > ( dev_x_buf, dev_dy_buf, dev_dx_buf, dev_gamma_buf, dev_dgamma_buf, dev_dbeta_buf, dev_mean_buf, dev_rstd_buf, reciprocal_frame_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; }
978279daf7e350b32706efa7666113aef289fd72.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <algorithm> #include <cmath> #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/linalg/add.cuh> #include <raft/linalg/unary_op.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <vector> namespace raft { namespace random { /* In this test we generate pseudo-random integers following a probability distribution defined by * an array of weights, such that the probability of the integer i is p_i=w_i/sum(w). A histogram of * the generated integers is compared to the expected probabilities. The histogram is normalized, * i.e divided by the number of drawn integers n=sampled_len*n_repeat. The expected value for the * index i of the histogram is E_i=p_i, the standard deviation sigma_i=sqrt(p_i*(1-p_i)/n). * * Weights are constructed as a sparse vector containing mostly zeros and a small number of non-zero * values. The test tolerance used to compare the actual and expected histograms is * eps=max(sigma_i). For the test to be relevant, the tolerance must be small w.r.t the non-zero * probabilities. Hence, n_repeat, sampled_len and nnz must be chosen accordingly. The test * automatically computes the tolerance and will fail if it is estimated too high for the test to be * relevant. */ template <typename IdxT> struct RngDiscreteInputs { IdxT n_repeat; IdxT sampled_len; IdxT len; IdxT nnz; GeneratorType gtype; unsigned long long int seed; }; template <typename WeightT, typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const RngDiscreteInputs<IdxT>& d) { return os << "{" << d.n_repeat << ", " << d.sampled_len << ", " << d.len << ", " << d.nnz << "}"; } template <typename LabelT, typename IdxT> void update_count( const LabelT* labels, IdxT* count, IdxT sampled_len, IdxT len, const hipStream_t& stream) { IdxT num_levels = len + 1; IdxT lower_level = 0; IdxT upper_level = len; rmm::device_uvector<IdxT> temp_count(len, stream); size_t temp_storage_bytes = 0; RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(nullptr, temp_storage_bytes, labels, temp_count.data(), num_levels, lower_level, upper_level, sampled_len, stream)); rmm::device_uvector<char> workspace(temp_storage_bytes, stream); RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(workspace.data(), temp_storage_bytes, labels, temp_count.data(), num_levels, lower_level, upper_level, sampled_len, stream)); raft::linalg::add(count, count, temp_count.data(), len, stream); } template <typename IdxT> void normalize_count( float* histogram, const IdxT* count, float scale, IdxT len, const hipStream_t& stream) { raft::linalg::unaryOp( histogram, count, len, [scale] __device__(const IdxT& cnt) { return static_cast<float>(cnt) / scale; }, stream); } template <typename OutT, typename WeightT, typename IdxT> class RngDiscreteTest : public ::testing::TestWithParam<RngDiscreteInputs<IdxT>> { public: RngDiscreteTest() : params(::testing::TestWithParam<RngDiscreteInputs<IdxT>>::GetParam()), stream(resource::get_cuda_stream(handle)), out(params.sampled_len, stream), weights(params.len, stream), histogram(params.len, stream), exp_histogram(params.len) { } protected: void SetUp() override { tolerance = 0.0f; std::vector<WeightT> h_weights(params.len, WeightT{0}); std::mt19937 gen(params.seed); std::uniform_real_distribution dis(WeightT{0.2}, WeightT{2.0}); WeightT total_weight = WeightT{0}; for (int i = 0; i < params.nnz; i++) { h_weights[i] = dis(gen); total_weight += h_weights[i]; } float min_p = 1.f; for (int i = 0; i < params.nnz; i++) { float p = static_cast<float>(h_weights[i] / total_weight); float n = static_cast<float>(params.n_repeat * params.sampled_len); float sigma = std::sqrt(p * (1.f - p) / n); tolerance = ::max(tolerance, 4.f * sigma); min_p = ::min(min_p, p); } EXPECT_TRUE(tolerance < 0.5f * min_p) << "Test tolerance (" << tolerance << ") is too high. Use more samples, more " "repetitions or less non-zero weights."; std::shuffle(h_weights.begin(), h_weights.end(), gen); raft::copy(weights.data(), h_weights.data(), params.len, stream); RngState r(params.seed, params.gtype); raft::device_vector_view<OutT, IdxT> out_view(out.data(), out.size()); auto weights_view = raft::make_device_vector_view<const WeightT, IdxT>(weights.data(), weights.size()); rmm::device_uvector<IdxT> count(params.len, stream); RAFT_CUDA_TRY(hipMemsetAsync(count.data(), 0, params.len * sizeof(IdxT), stream)); for (int iter = 0; iter < params.n_repeat; iter++) { discrete(handle, r, out_view, weights_view); update_count(out.data(), count.data(), params.sampled_len, params.len, stream); } float scale = static_cast<float>(params.sampled_len * params.n_repeat); normalize_count(histogram.data(), count.data(), scale, params.len, stream); // Compute the expected normalized histogram for (IdxT i = 0; i < params.len; i++) { exp_histogram[i] = h_weights[i] / total_weight; } } protected: raft::resources handle; hipStream_t stream; RngDiscreteInputs<IdxT> params; float tolerance; rmm::device_uvector<OutT> out; rmm::device_uvector<WeightT> weights; rmm::device_uvector<float> histogram; std::vector<float> exp_histogram; }; const std::vector<RngDiscreteInputs<int>> inputs_i32 = { {1, 10000, 5, 5, GenPC, 123ULL}, {1, 10000, 10, 7, GenPC, 456ULL}, {1000, 100, 10000, 20, GenPC, 123ULL}, {1, 10000, 5, 5, GenPhilox, 1234ULL}, }; const std::vector<RngDiscreteInputs<int64_t>> inputs_i64 = { {1, 10000, 5, 5, GenPC, 123ULL}, {1, 10000, 10, 7, GenPC, 456ULL}, {1000, 100, 10000, 20, GenPC, 123ULL}, {1, 10000, 5, 5, GenPhilox, 1234ULL}, }; #define RNG_DISCRETE_TEST(test_type, test_name, test_inputs) \ typedef RAFT_DEPAREN(test_type) test_name; \ TEST_P(test_name, Result) \ { \ ASSERT_TRUE(devArrMatchHost(exp_histogram.data(), \ histogram.data(), \ exp_histogram.size(), \ CompareApprox<float>(tolerance), \ stream)); \ } \ INSTANTIATE_TEST_CASE_P(ReduceTests, test_name, ::testing::ValuesIn(test_inputs)) RNG_DISCRETE_TEST((RngDiscreteTest<int, float, int>), RngDiscreteTestI32FI32, inputs_i32); RNG_DISCRETE_TEST((RngDiscreteTest<uint32_t, float, int>), RngDiscreteTestU32FI32, inputs_i32); RNG_DISCRETE_TEST((RngDiscreteTest<int64_t, float, int>), RngDiscreteTestI64FI32, inputs_i32); RNG_DISCRETE_TEST((RngDiscreteTest<int, double, int>), RngDiscreteTestI32DI32, inputs_i32); // Disable IdxT=int64_t test due to CUB error: https://github.com/NVIDIA/cub/issues/192 // RNG_DISCRETE_TEST((RngDiscreteTest<int, float, int64_t>), RngDiscreteTestI32FI64, inputs_i64); } // namespace random } // namespace raft
978279daf7e350b32706efa7666113aef289fd72.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <algorithm> #include <cmath> #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/linalg/add.cuh> #include <raft/linalg/unary_op.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <vector> namespace raft { namespace random { /* In this test we generate pseudo-random integers following a probability distribution defined by * an array of weights, such that the probability of the integer i is p_i=w_i/sum(w). A histogram of * the generated integers is compared to the expected probabilities. The histogram is normalized, * i.e divided by the number of drawn integers n=sampled_len*n_repeat. The expected value for the * index i of the histogram is E_i=p_i, the standard deviation sigma_i=sqrt(p_i*(1-p_i)/n). * * Weights are constructed as a sparse vector containing mostly zeros and a small number of non-zero * values. The test tolerance used to compare the actual and expected histograms is * eps=max(sigma_i). For the test to be relevant, the tolerance must be small w.r.t the non-zero * probabilities. Hence, n_repeat, sampled_len and nnz must be chosen accordingly. The test * automatically computes the tolerance and will fail if it is estimated too high for the test to be * relevant. */ template <typename IdxT> struct RngDiscreteInputs { IdxT n_repeat; IdxT sampled_len; IdxT len; IdxT nnz; GeneratorType gtype; unsigned long long int seed; }; template <typename WeightT, typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const RngDiscreteInputs<IdxT>& d) { return os << "{" << d.n_repeat << ", " << d.sampled_len << ", " << d.len << ", " << d.nnz << "}"; } template <typename LabelT, typename IdxT> void update_count( const LabelT* labels, IdxT* count, IdxT sampled_len, IdxT len, const cudaStream_t& stream) { IdxT num_levels = len + 1; IdxT lower_level = 0; IdxT upper_level = len; rmm::device_uvector<IdxT> temp_count(len, stream); size_t temp_storage_bytes = 0; RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(nullptr, temp_storage_bytes, labels, temp_count.data(), num_levels, lower_level, upper_level, sampled_len, stream)); rmm::device_uvector<char> workspace(temp_storage_bytes, stream); RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(workspace.data(), temp_storage_bytes, labels, temp_count.data(), num_levels, lower_level, upper_level, sampled_len, stream)); raft::linalg::add(count, count, temp_count.data(), len, stream); } template <typename IdxT> void normalize_count( float* histogram, const IdxT* count, float scale, IdxT len, const cudaStream_t& stream) { raft::linalg::unaryOp( histogram, count, len, [scale] __device__(const IdxT& cnt) { return static_cast<float>(cnt) / scale; }, stream); } template <typename OutT, typename WeightT, typename IdxT> class RngDiscreteTest : public ::testing::TestWithParam<RngDiscreteInputs<IdxT>> { public: RngDiscreteTest() : params(::testing::TestWithParam<RngDiscreteInputs<IdxT>>::GetParam()), stream(resource::get_cuda_stream(handle)), out(params.sampled_len, stream), weights(params.len, stream), histogram(params.len, stream), exp_histogram(params.len) { } protected: void SetUp() override { tolerance = 0.0f; std::vector<WeightT> h_weights(params.len, WeightT{0}); std::mt19937 gen(params.seed); std::uniform_real_distribution dis(WeightT{0.2}, WeightT{2.0}); WeightT total_weight = WeightT{0}; for (int i = 0; i < params.nnz; i++) { h_weights[i] = dis(gen); total_weight += h_weights[i]; } float min_p = 1.f; for (int i = 0; i < params.nnz; i++) { float p = static_cast<float>(h_weights[i] / total_weight); float n = static_cast<float>(params.n_repeat * params.sampled_len); float sigma = std::sqrt(p * (1.f - p) / n); tolerance = std::max(tolerance, 4.f * sigma); min_p = std::min(min_p, p); } EXPECT_TRUE(tolerance < 0.5f * min_p) << "Test tolerance (" << tolerance << ") is too high. Use more samples, more " "repetitions or less non-zero weights."; std::shuffle(h_weights.begin(), h_weights.end(), gen); raft::copy(weights.data(), h_weights.data(), params.len, stream); RngState r(params.seed, params.gtype); raft::device_vector_view<OutT, IdxT> out_view(out.data(), out.size()); auto weights_view = raft::make_device_vector_view<const WeightT, IdxT>(weights.data(), weights.size()); rmm::device_uvector<IdxT> count(params.len, stream); RAFT_CUDA_TRY(cudaMemsetAsync(count.data(), 0, params.len * sizeof(IdxT), stream)); for (int iter = 0; iter < params.n_repeat; iter++) { discrete(handle, r, out_view, weights_view); update_count(out.data(), count.data(), params.sampled_len, params.len, stream); } float scale = static_cast<float>(params.sampled_len * params.n_repeat); normalize_count(histogram.data(), count.data(), scale, params.len, stream); // Compute the expected normalized histogram for (IdxT i = 0; i < params.len; i++) { exp_histogram[i] = h_weights[i] / total_weight; } } protected: raft::resources handle; cudaStream_t stream; RngDiscreteInputs<IdxT> params; float tolerance; rmm::device_uvector<OutT> out; rmm::device_uvector<WeightT> weights; rmm::device_uvector<float> histogram; std::vector<float> exp_histogram; }; const std::vector<RngDiscreteInputs<int>> inputs_i32 = { {1, 10000, 5, 5, GenPC, 123ULL}, {1, 10000, 10, 7, GenPC, 456ULL}, {1000, 100, 10000, 20, GenPC, 123ULL}, {1, 10000, 5, 5, GenPhilox, 1234ULL}, }; const std::vector<RngDiscreteInputs<int64_t>> inputs_i64 = { {1, 10000, 5, 5, GenPC, 123ULL}, {1, 10000, 10, 7, GenPC, 456ULL}, {1000, 100, 10000, 20, GenPC, 123ULL}, {1, 10000, 5, 5, GenPhilox, 1234ULL}, }; #define RNG_DISCRETE_TEST(test_type, test_name, test_inputs) \ typedef RAFT_DEPAREN(test_type) test_name; \ TEST_P(test_name, Result) \ { \ ASSERT_TRUE(devArrMatchHost(exp_histogram.data(), \ histogram.data(), \ exp_histogram.size(), \ CompareApprox<float>(tolerance), \ stream)); \ } \ INSTANTIATE_TEST_CASE_P(ReduceTests, test_name, ::testing::ValuesIn(test_inputs)) RNG_DISCRETE_TEST((RngDiscreteTest<int, float, int>), RngDiscreteTestI32FI32, inputs_i32); RNG_DISCRETE_TEST((RngDiscreteTest<uint32_t, float, int>), RngDiscreteTestU32FI32, inputs_i32); RNG_DISCRETE_TEST((RngDiscreteTest<int64_t, float, int>), RngDiscreteTestI64FI32, inputs_i32); RNG_DISCRETE_TEST((RngDiscreteTest<int, double, int>), RngDiscreteTestI32DI32, inputs_i32); // Disable IdxT=int64_t test due to CUB error: https://github.com/NVIDIA/cub/issues/192 // RNG_DISCRETE_TEST((RngDiscreteTest<int, float, int64_t>), RngDiscreteTestI32FI64, inputs_i64); } // namespace random } // namespace raft
b0912d741e198a9002155c971d137402bfa7775e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <float.h> #include <math.h> #include <thrust/tuple.h> #include <cstdio> #include <tuple> #include "rasterize_points/bitmask.cuh" #include "rasterize_points/rasterization_utils.cuh" #include "utils/float_math.cuh" #include "utils/geometry_utils.cuh" namespace { // A structure for holding details about a pixel. struct Pixel { float z; int64_t idx; float dist; float3 bary; }; __device__ bool operator<(const Pixel& a, const Pixel& b) { return a.z < b.z; } __device__ float FloatMin3(const float p1, const float p2, const float p3) { return fminf(p1, fminf(p2, p3)); } __device__ float FloatMax3(const float p1, const float p2, const float p3) { return fmaxf(p1, fmaxf(p2, p3)); } // Get the xyz coordinates of the three vertices for the face given by the // index face_idx into face_verts. __device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts( const float* face_verts, int face_idx) { const float x0 = face_verts[face_idx * 9 + 0]; const float y0 = face_verts[face_idx * 9 + 1]; const float z0 = face_verts[face_idx * 9 + 2]; const float x1 = face_verts[face_idx * 9 + 3]; const float y1 = face_verts[face_idx * 9 + 4]; const float z1 = face_verts[face_idx * 9 + 5]; const float x2 = face_verts[face_idx * 9 + 6]; const float y2 = face_verts[face_idx * 9 + 7]; const float z2 = face_verts[face_idx * 9 + 8]; const float3 v0xyz = make_float3(x0, y0, z0); const float3 v1xyz = make_float3(x1, y1, z1); const float3 v2xyz = make_float3(x2, y2, z2); return thrust::make_tuple(v0xyz, v1xyz, v2xyz); } // Get the min/max x/y/z values for the face given by vertices v0, v1, v2. __device__ thrust::tuple<float2, float2, float2> GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) { const float xmin = FloatMin3(v0.x, v1.x, v2.x); const float ymin = FloatMin3(v0.y, v1.y, v2.y); const float zmin = FloatMin3(v0.z, v1.z, v2.z); const float xmax = FloatMax3(v0.x, v1.x, v2.x); const float ymax = FloatMax3(v0.y, v1.y, v2.y); const float zmax = FloatMax3(v0.z, v1.z, v2.z); return thrust::make_tuple( make_float2(xmin, xmax), make_float2(ymin, ymax), make_float2(zmin, zmax)); } // Check if the point (px, py) lies outside the face bounding box face_bbox. // Return true if the point is outside. __device__ bool CheckPointOutsideBoundingBox( float3 v0, float3 v1, float3 v2, float blur_radius, float2 pxy) { const auto bbox = GetFaceBoundingBox(v0, v1, v2); const float2 xlims = thrust::get<0>(bbox); const float2 ylims = thrust::get<1>(bbox); const float2 zlims = thrust::get<2>(bbox); const float x_min = xlims.x - blur_radius; const float y_min = ylims.x - blur_radius; const float x_max = xlims.y + blur_radius; const float y_max = ylims.y + blur_radius; // Faces with at least one vertex behind the camera won't render correctly // and should be removed or clipped before calling the rasterizer const bool z_invalid = zlims.x < kEpsilon; // Check if the current point is oustside the triangle bounding box. return ( pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min || z_invalid); } // This function checks if a pixel given by xy location pxy lies within the // face with index face_idx in face_verts. One of the inputs is a list (q) // which contains Pixel structs with the indices of the faces which intersect // with this pixel sorted by closest z distance. If the point pxy lies in the // face, the list (q) is updated and re-orderered in place. In addition // the auxillary variables q_size, q_max_z and q_max_idx are also modified. // This code is shared between RasterizeMeshesNaiveCudaKernel and // RasterizeMeshesFineCudaKernel. template <typename FaceQ> __device__ void CheckPixelInsideFace( const float* face_verts, // (F, 3, 3) const int face_idx, int& q_size, float& q_max_z, int& q_max_idx, FaceQ& q, const float blur_radius, const float2 pxy, // Coordinates of the pixel const int K, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { const auto v012 = GetSingleFaceVerts(face_verts, face_idx); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only need xy for barycentric coordinates and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Perform checks and skip if: // 1. the face is behind the camera // 2. the face is facing away from the camera // 3. the face has very small face area // 4. the pixel is outside the face bbox const float zmax = FloatMax3(v0.z, v1.z, v2.z); const bool outside_bbox = CheckPointOutsideBoundingBox( v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy); // Check if the face is visible to the camera. const bool back_face = face_area < 0.0; const bool zero_face_area = face_area <= kEpsilon && face_area >= -1.0f * kEpsilon; if (zmax < 0 || cull_backfaces && back_face || outside_bbox || zero_face_area) return; // Calculate barycentric coords and euclidean dist to triangle. const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 p_bary = !perspective_correct ? p_bary0 : BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z); const float3 p_bary_clip = !clip_barycentric_coords ? p_bary : BarycentricClipForward(p_bary); const float pz = p_bary_clip.x * v0.z + p_bary_clip.y * v1.z + p_bary_clip.z * v2.z; if (pz < 0) return; // Face is behind the image plane. // Get abs squared distance const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy); // Use the unclipped bary coordinates to determine if the point is inside the // face. const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f; const float signed_dist = inside ? -dist : dist; // Check if pixel is outside blur region if (!inside && dist >= blur_radius) { return; } if (q_size < K) { // Just insert it. q[q_size] = {pz, face_idx, signed_dist, p_bary_clip}; if (pz > q_max_z) { q_max_z = pz; q_max_idx = q_size; } q_size++; } else if (pz < q_max_z) { // Overwrite the old max, and find the new max. q[q_max_idx] = {pz, face_idx, signed_dist, p_bary_clip}; q_max_z = pz; for (int i = 0; i < K; i++) { if (q[i].z > q_max_z) { q_max_z = q[i].z; q_max_idx = i; } } } } } // namespace // **************************************************************************** // * NAIVE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesNaiveCudaKernel( const float* face_verts, const int64_t* mesh_to_face_first_idx, const int64_t* num_faces_per_mesh, const float blur_radius, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces, const int N, const int H, const int W, const int K, int64_t* face_idxs, float* zbuf, float* pix_dists, float* bary) { // Simple version: One thread per output pixel int num_threads = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i = tid; i < N * H * W; i += num_threads) { // Convert linear index to 3D index const int n = i / (H * W); // batch index. const int pix_idx = i % (H * W); // Reverse ordering of X and Y axes const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; // screen coordinates to ndc coordiantes of pixel. const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); const float2 pxy = make_float2(xf, yf); // For keeping track of the K closest points we want a data structure // that (1) gives O(1) access to the closest point for easy comparisons, // and (2) allows insertion of new elements. In the CPU version we use // std::priority_queue; then (2) is O(log K). We can't use STL // containers in CUDA; we could roll our own max heap in an array, but // that would likely have a lot of warp divergence so we do something // simpler instead: keep the elements in an unsorted array, but keep // track of the max value and the index of the max value. Then (1) is // still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8 // this should be fast enough for our purposes. Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; // Using the batch index of the thread get the start and stop // indices for the faces. const int64_t face_start_idx = mesh_to_face_first_idx[n]; const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n]; // Loop through the faces in the mesh. for (int f = face_start_idx; f < face_stop_idx; ++f) { // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct, clip_barycentric_coords, cull_backfaces); } // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); int idx = n * H * W * K + pix_idx * K; for (int k = 0; k < q_size; ++k) { face_idxs[idx + k] = q[k].idx; zbuf[idx + k] = q[k].z; pix_dists[idx + k] = q[k].dist; bary[(idx + k) * 3 + 0] = q[k].bary.x; bary[(idx + k) * 3 + 1] = q[k].bary.y; bary[(idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> RasterizeMeshesNaiveCuda( const at::Tensor& face_verts, const at::Tensor& mesh_to_faces_packed_first_idx, const at::Tensor& num_faces_per_mesh, const std::tuple<int, int> image_size, const float blur_radius, const int num_closest, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); TORCH_CHECK( num_faces_per_mesh.size(0) == mesh_to_faces_packed_first_idx.size(0), "num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx"); if (num_closest > kMaxPointsPerPixel) { std::stringstream ss; ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel; AT_ERROR(ss.str()); } // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, mesh_to_faces_packed_first_idx_t{ mesh_to_faces_packed_first_idx, "mesh_to_faces_packed_first_idx", 2}, num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3}; at::CheckedFrom c = "RasterizeMeshesNaiveCuda"; at::checkAllSameGPU( c, {face_verts_t, mesh_to_faces_packed_first_idx_t, num_faces_per_mesh_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int N = num_faces_per_mesh.size(0); // batch size. const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); const int K = num_closest; auto long_opts = num_faces_per_mesh.options().dtype(at::kLong); auto float_opts = face_verts.options().dtype(at::kFloat); at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); if (face_idxs.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizeMeshesNaiveCudaKernel), dim3(blocks), dim3(threads), 0, stream, face_verts.contiguous().data_ptr<float>(), mesh_to_faces_packed_first_idx.contiguous().data_ptr<int64_t>(), num_faces_per_mesh.contiguous().data_ptr<int64_t>(), blur_radius, perspective_correct, clip_barycentric_coords, cull_backfaces, N, H, W, K, face_idxs.data_ptr<int64_t>(), zbuf.data_ptr<float>(), pix_dists.data_ptr<float>(), bary.data_ptr<float>()); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } // **************************************************************************** // * BACKWARD PASS * // **************************************************************************** // TODO: benchmark parallelizing over faces_verts instead of over pixels. __global__ void RasterizeMeshesBackwardCudaKernel( const float* face_verts, // (F, 3, 3) const int64_t* pix_to_face, // (N, H, W, K) const bool perspective_correct, const bool clip_barycentric_coords, const int N, const int H, const int W, const int K, const float* grad_zbuf, // (N, H, W, K) const float* grad_bary, // (N, H, W, K, 3) const float* grad_dists, // (N, H, W, K) float* grad_face_verts) { // (F, 3, 3) // Parallelize over each pixel in images of // size H * W, for each image in the batch of size N. const int num_threads = gridDim.x * blockDim.x; const int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int t_i = tid; t_i < N * H * W; t_i += num_threads) { // Convert linear index to 3D index const int n = t_i / (H * W); // batch index. const int pix_idx = t_i % (H * W); // Reverse ordering of X and Y axes. const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); const float2 pxy = make_float2(xf, yf); // Loop over all the faces for this pixel. for (int k = 0; k < K; k++) { // Index into (N, H, W, K, :) grad tensors // pixel index + top k index int i = n * H * W * K + pix_idx * K + k; const int f = pix_to_face[i]; if (f < 0) { continue; // padded face. } // Get xyz coordinates of the three face vertices. const auto v012 = GetSingleFaceVerts(face_verts, f); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only neex xy for barycentric coordinate and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Get upstream gradients for the face. const float grad_dist_upstream = grad_dists[i]; const float grad_zbuf_upstream = grad_zbuf[i]; const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0]; const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1]; const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2]; const float3 grad_bary_upstream = make_float3( grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2); const float3 b_w = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 b_pp = !perspective_correct ? b_w : BarycentricPerspectiveCorrectionForward(b_w, v0.z, v1.z, v2.z); const float3 b_w_clip = !clip_barycentric_coords ? b_pp : BarycentricClipForward(b_pp); const bool inside = b_pp.x > 0.0f && b_pp.y > 0.0f && b_pp.z > 0.0f; const float sign = inside ? -1.0f : 1.0f; auto grad_dist_f = PointTriangleDistanceBackward( pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream); const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f); const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f); const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f); // Upstream gradient for barycentric coords from zbuf calculation: // zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2 // Therefore // d_zbuf/d_bary_w0 = z0 // d_zbuf/d_bary_w1 = z1 // d_zbuf/d_bary_w2 = z2 const float3 d_zbuf_d_bwclip = make_float3(v0.z, v1.z, v2.z); // Total upstream barycentric gradients are the sum of // external upstream gradients and contribution from zbuf. const float3 grad_bary_f_sum = (grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bwclip); float3 grad_bary0 = grad_bary_f_sum; if (clip_barycentric_coords) { grad_bary0 = BarycentricClipBackward(b_w, grad_bary_f_sum); } float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f; if (perspective_correct) { auto perspective_grads = BarycentricPerspectiveCorrectionBackward( b_w, v0.z, v1.z, v2.z, grad_bary0); grad_bary0 = thrust::get<0>(perspective_grads); dz0_persp = thrust::get<1>(perspective_grads); dz1_persp = thrust::get<2>(perspective_grads); dz2_persp = thrust::get<3>(perspective_grads); } auto grad_bary_f = BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0); const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f); const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f); const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f); atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x); atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y); atomicAdd( grad_face_verts + f * 9 + 2, grad_zbuf_upstream * b_w_clip.x + dz0_persp); atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x); atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y); atomicAdd( grad_face_verts + f * 9 + 5, grad_zbuf_upstream * b_w_clip.y + dz1_persp); atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x); atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y); atomicAdd( grad_face_verts + f * 9 + 8, grad_zbuf_upstream * b_w_clip.z + dz2_persp); } } } at::Tensor RasterizeMeshesBackwardCuda( const at::Tensor& face_verts, // (F, 3, 3) const at::Tensor& pix_to_face, // (N, H, W, K) const at::Tensor& grad_zbuf, // (N, H, W, K) const at::Tensor& grad_bary, // (N, H, W, K, 3) const at::Tensor& grad_dists, // (N, H, W, K) const bool perspective_correct, const bool clip_barycentric_coords) { // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, pix_to_face_t{pix_to_face, "pix_to_face", 2}, grad_zbuf_t{grad_zbuf, "grad_zbuf", 3}, grad_bary_t{grad_bary, "grad_bary", 4}, grad_dists_t{grad_dists, "grad_dists", 5}; at::CheckedFrom c = "RasterizeMeshesBackwardCuda"; at::checkAllSameGPU( c, {face_verts_t, pix_to_face_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); at::checkAllSameType( c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int F = face_verts.size(0); const int N = pix_to_face.size(0); const int H = pix_to_face.size(1); const int W = pix_to_face.size(2); const int K = pix_to_face.size(3); at::Tensor grad_face_verts = at::zeros({F, 3, 3}, face_verts.options()); if (grad_face_verts.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return grad_face_verts; } const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizeMeshesBackwardCudaKernel), dim3(blocks), dim3(threads), 0, stream, face_verts.contiguous().data_ptr<float>(), pix_to_face.contiguous().data_ptr<int64_t>(), perspective_correct, clip_barycentric_coords, N, H, W, K, grad_zbuf.contiguous().data_ptr<float>(), grad_bary.contiguous().data_ptr<float>(), grad_dists.contiguous().data_ptr<float>(), grad_face_verts.data_ptr<float>()); AT_CUDA_CHECK(hipGetLastError()); return grad_face_verts; } // **************************************************************************** // * COARSE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesCoarseCudaKernel( const float* face_verts, const int64_t* mesh_to_face_first_idx, const int64_t* num_faces_per_mesh, const float blur_radius, const int N, const int F, const int H, const int W, const int bin_size, const int chunk_size, const int max_faces_per_bin, int* faces_per_bin, int* bin_faces) { extern __shared__ char sbuf[]; const int M = max_faces_per_bin; // Integer divide round up const int num_bins_x = 1 + (W - 1) / bin_size; const int num_bins_y = 1 + (H - 1) / bin_size; // NDC range depends on the ratio of W/H // The shorter side from (H, W) is given an NDC range of 2.0 and // the other side is scaled by the ratio of H:W. const float NDC_x_half_range = NonSquareNdcRange(W, H) / 2.0f; const float NDC_y_half_range = NonSquareNdcRange(H, W) / 2.0f; // Size of half a pixel in NDC units is the NDC half range // divided by the corresponding image dimension const float half_pix_x = NDC_x_half_range / W; const float half_pix_y = NDC_y_half_range / H; // This is a boolean array of shape (num_bins_y, num_bins_x, chunk_size) // stored in shared memory that will track whether each point in the chunk // falls into each bin of the image. BitMask binmask((unsigned int*)sbuf, num_bins_y, num_bins_x, chunk_size); // Have each block handle a chunk of faces const int chunks_per_batch = 1 + (F - 1) / chunk_size; const int num_chunks = N * chunks_per_batch; for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) { const int batch_idx = chunk / chunks_per_batch; // batch index const int chunk_idx = chunk % chunks_per_batch; const int face_start_idx = chunk_idx * chunk_size; binmask.block_clear(); const int64_t mesh_face_start_idx = mesh_to_face_first_idx[batch_idx]; const int64_t mesh_face_stop_idx = mesh_face_start_idx + num_faces_per_mesh[batch_idx]; // Have each thread handle a different face within the chunk for (int f = threadIdx.x; f < chunk_size; f += blockDim.x) { const int f_idx = face_start_idx + f; // Check if face index corresponds to the mesh in the batch given by // batch_idx if (f_idx >= mesh_face_stop_idx || f_idx < mesh_face_start_idx) { continue; } // Get xyz coordinates of the three face vertices. const auto v012 = GetSingleFaceVerts(face_verts, f_idx); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Compute screen-space bbox for the triangle expanded by blur. float xmin = FloatMin3(v0.x, v1.x, v2.x) - sqrt(blur_radius); float ymin = FloatMin3(v0.y, v1.y, v2.y) - sqrt(blur_radius); float xmax = FloatMax3(v0.x, v1.x, v2.x) + sqrt(blur_radius); float ymax = FloatMax3(v0.y, v1.y, v2.y) + sqrt(blur_radius); float zmin = FloatMin3(v0.z, v1.z, v2.z); // Faces with at least one vertex behind the camera won't render // correctly and should be removed or clipped before calling the // rasterizer if (zmin < kEpsilon) { continue; } // Brute-force search over all bins; TODO(T54294966) something smarter. for (int by = 0; by < num_bins_y; ++by) { // Y coordinate of the top and bottom of the bin. // PixToNdc gives the location of the center of each pixel, so we // need to add/subtract a half pixel to get the true extent of the bin. // Reverse ordering of Y axis so that +Y is upwards in the image. const float bin_y_min = PixToNonSquareNdc(by * bin_size, H, W) - half_pix_y; const float bin_y_max = PixToNonSquareNdc((by + 1) * bin_size - 1, H, W) + half_pix_y; const bool y_overlap = (ymin <= bin_y_max) && (bin_y_min < ymax); for (int bx = 0; bx < num_bins_x; ++bx) { // X coordinate of the left and right of the bin. // Reverse ordering of x axis so that +X is left. const float bin_x_max = PixToNonSquareNdc((bx + 1) * bin_size - 1, W, H) + half_pix_x; const float bin_x_min = PixToNonSquareNdc(bx * bin_size, W, H) - half_pix_x; const bool x_overlap = (xmin <= bin_x_max) && (bin_x_min < xmax); if (y_overlap && x_overlap) { binmask.set(by, bx, f); } } } } __syncthreads(); // Now we have processed every face in the current chunk. We need to // count the number of faces in each bin so we can write the indices // out to global memory. We have each thread handle a different bin. for (int byx = threadIdx.x; byx < num_bins_y * num_bins_x; byx += blockDim.x) { const int by = byx / num_bins_x; const int bx = byx % num_bins_x; const int count = binmask.count(by, bx); const int faces_per_bin_idx = batch_idx * num_bins_y * num_bins_x + by * num_bins_x + bx; // This atomically increments the (global) number of faces found // in the current bin, and gets the previous value of the counter; // this effectively allocates space in the bin_faces array for the // faces in the current chunk that fall into this bin. const int start = atomicAdd(faces_per_bin + faces_per_bin_idx, count); // Now loop over the binmask and write the active bits for this bin // out to bin_faces. int next_idx = batch_idx * num_bins_y * num_bins_x * M + by * num_bins_x * M + bx * M + start; for (int f = 0; f < chunk_size; ++f) { if (binmask.get(by, bx, f)) { // TODO(T54296346) find the correct method for handling errors in // CUDA. Throw an error if num_faces_per_bin > max_faces_per_bin. // Either decrease bin size or increase max_faces_per_bin bin_faces[next_idx] = face_start_idx + f; next_idx++; } } } __syncthreads(); } } at::Tensor RasterizeMeshesCoarseCuda( const at::Tensor& face_verts, const at::Tensor& mesh_to_face_first_idx, const at::Tensor& num_faces_per_mesh, const std::tuple<int, int> image_size, const float blur_radius, const int bin_size, const int max_faces_per_bin) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, mesh_to_face_first_idx_t{ mesh_to_face_first_idx, "mesh_to_face_first_idx", 2}, num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3}; at::CheckedFrom c = "RasterizeMeshesCoarseCuda"; at::checkAllSameGPU( c, {face_verts_t, mesh_to_face_first_idx_t, num_faces_per_mesh_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); const int F = face_verts.size(0); const int N = num_faces_per_mesh.size(0); const int M = max_faces_per_bin; // Integer divide round up. const int num_bins_y = 1 + (H - 1) / bin_size; const int num_bins_x = 1 + (W - 1) / bin_size; if (num_bins_y >= kMaxItemsPerBin || num_bins_x >= kMaxItemsPerBin) { std::stringstream ss; ss << "In Coarse Rasterizer got num_bins_y: " << num_bins_y << ", num_bins_x: " << num_bins_x << ", " << "; that's too many!"; AT_ERROR(ss.str()); } auto opts = num_faces_per_mesh.options().dtype(at::kInt); at::Tensor faces_per_bin = at::zeros({N, num_bins_y, num_bins_x}, opts); at::Tensor bin_faces = at::full({N, num_bins_y, num_bins_x, M}, -1, opts); if (bin_faces.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return bin_faces; } const int chunk_size = 512; const size_t shared_size = num_bins_y * num_bins_x * chunk_size / 8; const size_t blocks = 64; const size_t threads = 512; hipLaunchKernelGGL(( RasterizeMeshesCoarseCudaKernel), dim3(blocks), dim3(threads), shared_size, stream, face_verts.contiguous().data_ptr<float>(), mesh_to_face_first_idx.contiguous().data_ptr<int64_t>(), num_faces_per_mesh.contiguous().data_ptr<int64_t>(), blur_radius, N, F, H, W, bin_size, chunk_size, M, faces_per_bin.data_ptr<int32_t>(), bin_faces.data_ptr<int32_t>()); AT_CUDA_CHECK(hipGetLastError()); return bin_faces; } // **************************************************************************** // * FINE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesFineCudaKernel( const float* face_verts, // (F, 3, 3) const int32_t* bin_faces, // (N, BH, BW, T) const float blur_radius, const int bin_size, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces, const int N, const int BH, const int BW, const int M, const int H, const int W, const int K, int64_t* face_idxs, // (N, H, W, K) float* zbuf, // (N, H, W, K) float* pix_dists, // (N, H, W, K) float* bary // (N, H, W, K, 3) ) { // This can be more than H * W if H or W are not divisible by bin_size. int num_pixels = N * BH * BW * bin_size * bin_size; int num_threads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int pid = tid; pid < num_pixels; pid += num_threads) { // Convert linear index into bin and pixel indices. We make the within // block pixel ids move the fastest, so that adjacent threads will fall // into the same bin; this should give them coalesced memory reads when // they read from faces and bin_faces. int i = pid; const int n = i / (BH * BW * bin_size * bin_size); i %= BH * BW * bin_size * bin_size; // bin index y const int by = i / (BW * bin_size * bin_size); i %= BW * bin_size * bin_size; // bin index y const int bx = i / (bin_size * bin_size); // pixel within the bin i %= bin_size * bin_size; // Pixel x, y indices const int yi = i / bin_size + by * bin_size; const int xi = i % bin_size + bx * bin_size; if (yi >= H || xi >= W) continue; const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); const float2 pxy = make_float2(xf, yf); // This part looks like the naive rasterization kernel, except we use // bin_faces to only look at a subset of faces already known to fall // in this bin. TODO abstract out this logic into some data structure // that is shared by both kernels? Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; for (int m = 0; m < M; m++) { const int f = bin_faces[n * BH * BW * M + by * BW * M + bx * M + m]; if (f < 0) { continue; // bin_faces uses -1 as a sentinal value. } // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct, clip_barycentric_coords, cull_backfaces); } // Now we've looked at all the faces for this bin, so we can write // output for the current pixel. // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); // Reverse ordering of the X and Y axis so that // in the image +Y is pointing up and +X is pointing left. const int yidx = H - 1 - yi; const int xidx = W - 1 - xi; const int pix_idx = n * H * W * K + yidx * W * K + xidx * K; for (int k = 0; k < q_size; k++) { face_idxs[pix_idx + k] = q[k].idx; zbuf[pix_idx + k] = q[k].z; pix_dists[pix_idx + k] = q[k].dist; bary[(pix_idx + k) * 3 + 0] = q[k].bary.x; bary[(pix_idx + k) * 3 + 1] = q[k].bary.y; bary[(pix_idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> RasterizeMeshesFineCuda( const at::Tensor& face_verts, const at::Tensor& bin_faces, const std::tuple<int, int> image_size, const float blur_radius, const int bin_size, const int faces_per_pixel, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); TORCH_CHECK(bin_faces.ndimension() == 4, "bin_faces must have 4 dimensions"); // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, bin_faces_t{bin_faces, "bin_faces", 2}; at::CheckedFrom c = "RasterizeMeshesFineCuda"; at::checkAllSameGPU(c, {face_verts_t, bin_faces_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // bin_faces shape (N, BH, BW, M) const int N = bin_faces.size(0); const int BH = bin_faces.size(1); const int BW = bin_faces.size(2); const int M = bin_faces.size(3); const int K = faces_per_pixel; const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); if (K > kMaxPointsPerPixel) { AT_ERROR("Must have num_closest <= 150"); } auto long_opts = bin_faces.options().dtype(at::kLong); auto float_opts = face_verts.options().dtype(at::kFloat); at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); if (face_idxs.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizeMeshesFineCudaKernel), dim3(blocks), dim3(threads), 0, stream, face_verts.contiguous().data_ptr<float>(), bin_faces.contiguous().data_ptr<int32_t>(), blur_radius, bin_size, perspective_correct, clip_barycentric_coords, cull_backfaces, N, BH, BW, M, H, W, K, face_idxs.data_ptr<int64_t>(), zbuf.data_ptr<float>(), pix_dists.data_ptr<float>(), bary.data_ptr<float>()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); }
b0912d741e198a9002155c971d137402bfa7775e.cu
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <float.h> #include <math.h> #include <thrust/tuple.h> #include <cstdio> #include <tuple> #include "rasterize_points/bitmask.cuh" #include "rasterize_points/rasterization_utils.cuh" #include "utils/float_math.cuh" #include "utils/geometry_utils.cuh" namespace { // A structure for holding details about a pixel. struct Pixel { float z; int64_t idx; float dist; float3 bary; }; __device__ bool operator<(const Pixel& a, const Pixel& b) { return a.z < b.z; } __device__ float FloatMin3(const float p1, const float p2, const float p3) { return fminf(p1, fminf(p2, p3)); } __device__ float FloatMax3(const float p1, const float p2, const float p3) { return fmaxf(p1, fmaxf(p2, p3)); } // Get the xyz coordinates of the three vertices for the face given by the // index face_idx into face_verts. __device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts( const float* face_verts, int face_idx) { const float x0 = face_verts[face_idx * 9 + 0]; const float y0 = face_verts[face_idx * 9 + 1]; const float z0 = face_verts[face_idx * 9 + 2]; const float x1 = face_verts[face_idx * 9 + 3]; const float y1 = face_verts[face_idx * 9 + 4]; const float z1 = face_verts[face_idx * 9 + 5]; const float x2 = face_verts[face_idx * 9 + 6]; const float y2 = face_verts[face_idx * 9 + 7]; const float z2 = face_verts[face_idx * 9 + 8]; const float3 v0xyz = make_float3(x0, y0, z0); const float3 v1xyz = make_float3(x1, y1, z1); const float3 v2xyz = make_float3(x2, y2, z2); return thrust::make_tuple(v0xyz, v1xyz, v2xyz); } // Get the min/max x/y/z values for the face given by vertices v0, v1, v2. __device__ thrust::tuple<float2, float2, float2> GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) { const float xmin = FloatMin3(v0.x, v1.x, v2.x); const float ymin = FloatMin3(v0.y, v1.y, v2.y); const float zmin = FloatMin3(v0.z, v1.z, v2.z); const float xmax = FloatMax3(v0.x, v1.x, v2.x); const float ymax = FloatMax3(v0.y, v1.y, v2.y); const float zmax = FloatMax3(v0.z, v1.z, v2.z); return thrust::make_tuple( make_float2(xmin, xmax), make_float2(ymin, ymax), make_float2(zmin, zmax)); } // Check if the point (px, py) lies outside the face bounding box face_bbox. // Return true if the point is outside. __device__ bool CheckPointOutsideBoundingBox( float3 v0, float3 v1, float3 v2, float blur_radius, float2 pxy) { const auto bbox = GetFaceBoundingBox(v0, v1, v2); const float2 xlims = thrust::get<0>(bbox); const float2 ylims = thrust::get<1>(bbox); const float2 zlims = thrust::get<2>(bbox); const float x_min = xlims.x - blur_radius; const float y_min = ylims.x - blur_radius; const float x_max = xlims.y + blur_radius; const float y_max = ylims.y + blur_radius; // Faces with at least one vertex behind the camera won't render correctly // and should be removed or clipped before calling the rasterizer const bool z_invalid = zlims.x < kEpsilon; // Check if the current point is oustside the triangle bounding box. return ( pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min || z_invalid); } // This function checks if a pixel given by xy location pxy lies within the // face with index face_idx in face_verts. One of the inputs is a list (q) // which contains Pixel structs with the indices of the faces which intersect // with this pixel sorted by closest z distance. If the point pxy lies in the // face, the list (q) is updated and re-orderered in place. In addition // the auxillary variables q_size, q_max_z and q_max_idx are also modified. // This code is shared between RasterizeMeshesNaiveCudaKernel and // RasterizeMeshesFineCudaKernel. template <typename FaceQ> __device__ void CheckPixelInsideFace( const float* face_verts, // (F, 3, 3) const int face_idx, int& q_size, float& q_max_z, int& q_max_idx, FaceQ& q, const float blur_radius, const float2 pxy, // Coordinates of the pixel const int K, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { const auto v012 = GetSingleFaceVerts(face_verts, face_idx); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only need xy for barycentric coordinates and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Perform checks and skip if: // 1. the face is behind the camera // 2. the face is facing away from the camera // 3. the face has very small face area // 4. the pixel is outside the face bbox const float zmax = FloatMax3(v0.z, v1.z, v2.z); const bool outside_bbox = CheckPointOutsideBoundingBox( v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy); // Check if the face is visible to the camera. const bool back_face = face_area < 0.0; const bool zero_face_area = face_area <= kEpsilon && face_area >= -1.0f * kEpsilon; if (zmax < 0 || cull_backfaces && back_face || outside_bbox || zero_face_area) return; // Calculate barycentric coords and euclidean dist to triangle. const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 p_bary = !perspective_correct ? p_bary0 : BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z); const float3 p_bary_clip = !clip_barycentric_coords ? p_bary : BarycentricClipForward(p_bary); const float pz = p_bary_clip.x * v0.z + p_bary_clip.y * v1.z + p_bary_clip.z * v2.z; if (pz < 0) return; // Face is behind the image plane. // Get abs squared distance const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy); // Use the unclipped bary coordinates to determine if the point is inside the // face. const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f; const float signed_dist = inside ? -dist : dist; // Check if pixel is outside blur region if (!inside && dist >= blur_radius) { return; } if (q_size < K) { // Just insert it. q[q_size] = {pz, face_idx, signed_dist, p_bary_clip}; if (pz > q_max_z) { q_max_z = pz; q_max_idx = q_size; } q_size++; } else if (pz < q_max_z) { // Overwrite the old max, and find the new max. q[q_max_idx] = {pz, face_idx, signed_dist, p_bary_clip}; q_max_z = pz; for (int i = 0; i < K; i++) { if (q[i].z > q_max_z) { q_max_z = q[i].z; q_max_idx = i; } } } } } // namespace // **************************************************************************** // * NAIVE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesNaiveCudaKernel( const float* face_verts, const int64_t* mesh_to_face_first_idx, const int64_t* num_faces_per_mesh, const float blur_radius, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces, const int N, const int H, const int W, const int K, int64_t* face_idxs, float* zbuf, float* pix_dists, float* bary) { // Simple version: One thread per output pixel int num_threads = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i = tid; i < N * H * W; i += num_threads) { // Convert linear index to 3D index const int n = i / (H * W); // batch index. const int pix_idx = i % (H * W); // Reverse ordering of X and Y axes const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; // screen coordinates to ndc coordiantes of pixel. const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); const float2 pxy = make_float2(xf, yf); // For keeping track of the K closest points we want a data structure // that (1) gives O(1) access to the closest point for easy comparisons, // and (2) allows insertion of new elements. In the CPU version we use // std::priority_queue; then (2) is O(log K). We can't use STL // containers in CUDA; we could roll our own max heap in an array, but // that would likely have a lot of warp divergence so we do something // simpler instead: keep the elements in an unsorted array, but keep // track of the max value and the index of the max value. Then (1) is // still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8 // this should be fast enough for our purposes. Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; // Using the batch index of the thread get the start and stop // indices for the faces. const int64_t face_start_idx = mesh_to_face_first_idx[n]; const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n]; // Loop through the faces in the mesh. for (int f = face_start_idx; f < face_stop_idx; ++f) { // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct, clip_barycentric_coords, cull_backfaces); } // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); int idx = n * H * W * K + pix_idx * K; for (int k = 0; k < q_size; ++k) { face_idxs[idx + k] = q[k].idx; zbuf[idx + k] = q[k].z; pix_dists[idx + k] = q[k].dist; bary[(idx + k) * 3 + 0] = q[k].bary.x; bary[(idx + k) * 3 + 1] = q[k].bary.y; bary[(idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> RasterizeMeshesNaiveCuda( const at::Tensor& face_verts, const at::Tensor& mesh_to_faces_packed_first_idx, const at::Tensor& num_faces_per_mesh, const std::tuple<int, int> image_size, const float blur_radius, const int num_closest, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); TORCH_CHECK( num_faces_per_mesh.size(0) == mesh_to_faces_packed_first_idx.size(0), "num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx"); if (num_closest > kMaxPointsPerPixel) { std::stringstream ss; ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel; AT_ERROR(ss.str()); } // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, mesh_to_faces_packed_first_idx_t{ mesh_to_faces_packed_first_idx, "mesh_to_faces_packed_first_idx", 2}, num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3}; at::CheckedFrom c = "RasterizeMeshesNaiveCuda"; at::checkAllSameGPU( c, {face_verts_t, mesh_to_faces_packed_first_idx_t, num_faces_per_mesh_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(face_verts.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const int N = num_faces_per_mesh.size(0); // batch size. const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); const int K = num_closest; auto long_opts = num_faces_per_mesh.options().dtype(at::kLong); auto float_opts = face_verts.options().dtype(at::kFloat); at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); if (face_idxs.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; RasterizeMeshesNaiveCudaKernel<<<blocks, threads, 0, stream>>>( face_verts.contiguous().data_ptr<float>(), mesh_to_faces_packed_first_idx.contiguous().data_ptr<int64_t>(), num_faces_per_mesh.contiguous().data_ptr<int64_t>(), blur_radius, perspective_correct, clip_barycentric_coords, cull_backfaces, N, H, W, K, face_idxs.data_ptr<int64_t>(), zbuf.data_ptr<float>(), pix_dists.data_ptr<float>(), bary.data_ptr<float>()); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } // **************************************************************************** // * BACKWARD PASS * // **************************************************************************** // TODO: benchmark parallelizing over faces_verts instead of over pixels. __global__ void RasterizeMeshesBackwardCudaKernel( const float* face_verts, // (F, 3, 3) const int64_t* pix_to_face, // (N, H, W, K) const bool perspective_correct, const bool clip_barycentric_coords, const int N, const int H, const int W, const int K, const float* grad_zbuf, // (N, H, W, K) const float* grad_bary, // (N, H, W, K, 3) const float* grad_dists, // (N, H, W, K) float* grad_face_verts) { // (F, 3, 3) // Parallelize over each pixel in images of // size H * W, for each image in the batch of size N. const int num_threads = gridDim.x * blockDim.x; const int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int t_i = tid; t_i < N * H * W; t_i += num_threads) { // Convert linear index to 3D index const int n = t_i / (H * W); // batch index. const int pix_idx = t_i % (H * W); // Reverse ordering of X and Y axes. const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); const float2 pxy = make_float2(xf, yf); // Loop over all the faces for this pixel. for (int k = 0; k < K; k++) { // Index into (N, H, W, K, :) grad tensors // pixel index + top k index int i = n * H * W * K + pix_idx * K + k; const int f = pix_to_face[i]; if (f < 0) { continue; // padded face. } // Get xyz coordinates of the three face vertices. const auto v012 = GetSingleFaceVerts(face_verts, f); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only neex xy for barycentric coordinate and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Get upstream gradients for the face. const float grad_dist_upstream = grad_dists[i]; const float grad_zbuf_upstream = grad_zbuf[i]; const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0]; const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1]; const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2]; const float3 grad_bary_upstream = make_float3( grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2); const float3 b_w = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 b_pp = !perspective_correct ? b_w : BarycentricPerspectiveCorrectionForward(b_w, v0.z, v1.z, v2.z); const float3 b_w_clip = !clip_barycentric_coords ? b_pp : BarycentricClipForward(b_pp); const bool inside = b_pp.x > 0.0f && b_pp.y > 0.0f && b_pp.z > 0.0f; const float sign = inside ? -1.0f : 1.0f; auto grad_dist_f = PointTriangleDistanceBackward( pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream); const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f); const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f); const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f); // Upstream gradient for barycentric coords from zbuf calculation: // zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2 // Therefore // d_zbuf/d_bary_w0 = z0 // d_zbuf/d_bary_w1 = z1 // d_zbuf/d_bary_w2 = z2 const float3 d_zbuf_d_bwclip = make_float3(v0.z, v1.z, v2.z); // Total upstream barycentric gradients are the sum of // external upstream gradients and contribution from zbuf. const float3 grad_bary_f_sum = (grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bwclip); float3 grad_bary0 = grad_bary_f_sum; if (clip_barycentric_coords) { grad_bary0 = BarycentricClipBackward(b_w, grad_bary_f_sum); } float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f; if (perspective_correct) { auto perspective_grads = BarycentricPerspectiveCorrectionBackward( b_w, v0.z, v1.z, v2.z, grad_bary0); grad_bary0 = thrust::get<0>(perspective_grads); dz0_persp = thrust::get<1>(perspective_grads); dz1_persp = thrust::get<2>(perspective_grads); dz2_persp = thrust::get<3>(perspective_grads); } auto grad_bary_f = BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0); const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f); const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f); const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f); atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x); atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y); atomicAdd( grad_face_verts + f * 9 + 2, grad_zbuf_upstream * b_w_clip.x + dz0_persp); atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x); atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y); atomicAdd( grad_face_verts + f * 9 + 5, grad_zbuf_upstream * b_w_clip.y + dz1_persp); atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x); atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y); atomicAdd( grad_face_verts + f * 9 + 8, grad_zbuf_upstream * b_w_clip.z + dz2_persp); } } } at::Tensor RasterizeMeshesBackwardCuda( const at::Tensor& face_verts, // (F, 3, 3) const at::Tensor& pix_to_face, // (N, H, W, K) const at::Tensor& grad_zbuf, // (N, H, W, K) const at::Tensor& grad_bary, // (N, H, W, K, 3) const at::Tensor& grad_dists, // (N, H, W, K) const bool perspective_correct, const bool clip_barycentric_coords) { // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, pix_to_face_t{pix_to_face, "pix_to_face", 2}, grad_zbuf_t{grad_zbuf, "grad_zbuf", 3}, grad_bary_t{grad_bary, "grad_bary", 4}, grad_dists_t{grad_dists, "grad_dists", 5}; at::CheckedFrom c = "RasterizeMeshesBackwardCuda"; at::checkAllSameGPU( c, {face_verts_t, pix_to_face_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); at::checkAllSameType( c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(face_verts.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const int F = face_verts.size(0); const int N = pix_to_face.size(0); const int H = pix_to_face.size(1); const int W = pix_to_face.size(2); const int K = pix_to_face.size(3); at::Tensor grad_face_verts = at::zeros({F, 3, 3}, face_verts.options()); if (grad_face_verts.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return grad_face_verts; } const size_t blocks = 1024; const size_t threads = 64; RasterizeMeshesBackwardCudaKernel<<<blocks, threads, 0, stream>>>( face_verts.contiguous().data_ptr<float>(), pix_to_face.contiguous().data_ptr<int64_t>(), perspective_correct, clip_barycentric_coords, N, H, W, K, grad_zbuf.contiguous().data_ptr<float>(), grad_bary.contiguous().data_ptr<float>(), grad_dists.contiguous().data_ptr<float>(), grad_face_verts.data_ptr<float>()); AT_CUDA_CHECK(cudaGetLastError()); return grad_face_verts; } // **************************************************************************** // * COARSE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesCoarseCudaKernel( const float* face_verts, const int64_t* mesh_to_face_first_idx, const int64_t* num_faces_per_mesh, const float blur_radius, const int N, const int F, const int H, const int W, const int bin_size, const int chunk_size, const int max_faces_per_bin, int* faces_per_bin, int* bin_faces) { extern __shared__ char sbuf[]; const int M = max_faces_per_bin; // Integer divide round up const int num_bins_x = 1 + (W - 1) / bin_size; const int num_bins_y = 1 + (H - 1) / bin_size; // NDC range depends on the ratio of W/H // The shorter side from (H, W) is given an NDC range of 2.0 and // the other side is scaled by the ratio of H:W. const float NDC_x_half_range = NonSquareNdcRange(W, H) / 2.0f; const float NDC_y_half_range = NonSquareNdcRange(H, W) / 2.0f; // Size of half a pixel in NDC units is the NDC half range // divided by the corresponding image dimension const float half_pix_x = NDC_x_half_range / W; const float half_pix_y = NDC_y_half_range / H; // This is a boolean array of shape (num_bins_y, num_bins_x, chunk_size) // stored in shared memory that will track whether each point in the chunk // falls into each bin of the image. BitMask binmask((unsigned int*)sbuf, num_bins_y, num_bins_x, chunk_size); // Have each block handle a chunk of faces const int chunks_per_batch = 1 + (F - 1) / chunk_size; const int num_chunks = N * chunks_per_batch; for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) { const int batch_idx = chunk / chunks_per_batch; // batch index const int chunk_idx = chunk % chunks_per_batch; const int face_start_idx = chunk_idx * chunk_size; binmask.block_clear(); const int64_t mesh_face_start_idx = mesh_to_face_first_idx[batch_idx]; const int64_t mesh_face_stop_idx = mesh_face_start_idx + num_faces_per_mesh[batch_idx]; // Have each thread handle a different face within the chunk for (int f = threadIdx.x; f < chunk_size; f += blockDim.x) { const int f_idx = face_start_idx + f; // Check if face index corresponds to the mesh in the batch given by // batch_idx if (f_idx >= mesh_face_stop_idx || f_idx < mesh_face_start_idx) { continue; } // Get xyz coordinates of the three face vertices. const auto v012 = GetSingleFaceVerts(face_verts, f_idx); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Compute screen-space bbox for the triangle expanded by blur. float xmin = FloatMin3(v0.x, v1.x, v2.x) - sqrt(blur_radius); float ymin = FloatMin3(v0.y, v1.y, v2.y) - sqrt(blur_radius); float xmax = FloatMax3(v0.x, v1.x, v2.x) + sqrt(blur_radius); float ymax = FloatMax3(v0.y, v1.y, v2.y) + sqrt(blur_radius); float zmin = FloatMin3(v0.z, v1.z, v2.z); // Faces with at least one vertex behind the camera won't render // correctly and should be removed or clipped before calling the // rasterizer if (zmin < kEpsilon) { continue; } // Brute-force search over all bins; TODO(T54294966) something smarter. for (int by = 0; by < num_bins_y; ++by) { // Y coordinate of the top and bottom of the bin. // PixToNdc gives the location of the center of each pixel, so we // need to add/subtract a half pixel to get the true extent of the bin. // Reverse ordering of Y axis so that +Y is upwards in the image. const float bin_y_min = PixToNonSquareNdc(by * bin_size, H, W) - half_pix_y; const float bin_y_max = PixToNonSquareNdc((by + 1) * bin_size - 1, H, W) + half_pix_y; const bool y_overlap = (ymin <= bin_y_max) && (bin_y_min < ymax); for (int bx = 0; bx < num_bins_x; ++bx) { // X coordinate of the left and right of the bin. // Reverse ordering of x axis so that +X is left. const float bin_x_max = PixToNonSquareNdc((bx + 1) * bin_size - 1, W, H) + half_pix_x; const float bin_x_min = PixToNonSquareNdc(bx * bin_size, W, H) - half_pix_x; const bool x_overlap = (xmin <= bin_x_max) && (bin_x_min < xmax); if (y_overlap && x_overlap) { binmask.set(by, bx, f); } } } } __syncthreads(); // Now we have processed every face in the current chunk. We need to // count the number of faces in each bin so we can write the indices // out to global memory. We have each thread handle a different bin. for (int byx = threadIdx.x; byx < num_bins_y * num_bins_x; byx += blockDim.x) { const int by = byx / num_bins_x; const int bx = byx % num_bins_x; const int count = binmask.count(by, bx); const int faces_per_bin_idx = batch_idx * num_bins_y * num_bins_x + by * num_bins_x + bx; // This atomically increments the (global) number of faces found // in the current bin, and gets the previous value of the counter; // this effectively allocates space in the bin_faces array for the // faces in the current chunk that fall into this bin. const int start = atomicAdd(faces_per_bin + faces_per_bin_idx, count); // Now loop over the binmask and write the active bits for this bin // out to bin_faces. int next_idx = batch_idx * num_bins_y * num_bins_x * M + by * num_bins_x * M + bx * M + start; for (int f = 0; f < chunk_size; ++f) { if (binmask.get(by, bx, f)) { // TODO(T54296346) find the correct method for handling errors in // CUDA. Throw an error if num_faces_per_bin > max_faces_per_bin. // Either decrease bin size or increase max_faces_per_bin bin_faces[next_idx] = face_start_idx + f; next_idx++; } } } __syncthreads(); } } at::Tensor RasterizeMeshesCoarseCuda( const at::Tensor& face_verts, const at::Tensor& mesh_to_face_first_idx, const at::Tensor& num_faces_per_mesh, const std::tuple<int, int> image_size, const float blur_radius, const int bin_size, const int max_faces_per_bin) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, mesh_to_face_first_idx_t{ mesh_to_face_first_idx, "mesh_to_face_first_idx", 2}, num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3}; at::CheckedFrom c = "RasterizeMeshesCoarseCuda"; at::checkAllSameGPU( c, {face_verts_t, mesh_to_face_first_idx_t, num_faces_per_mesh_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(face_verts.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); const int F = face_verts.size(0); const int N = num_faces_per_mesh.size(0); const int M = max_faces_per_bin; // Integer divide round up. const int num_bins_y = 1 + (H - 1) / bin_size; const int num_bins_x = 1 + (W - 1) / bin_size; if (num_bins_y >= kMaxItemsPerBin || num_bins_x >= kMaxItemsPerBin) { std::stringstream ss; ss << "In Coarse Rasterizer got num_bins_y: " << num_bins_y << ", num_bins_x: " << num_bins_x << ", " << "; that's too many!"; AT_ERROR(ss.str()); } auto opts = num_faces_per_mesh.options().dtype(at::kInt); at::Tensor faces_per_bin = at::zeros({N, num_bins_y, num_bins_x}, opts); at::Tensor bin_faces = at::full({N, num_bins_y, num_bins_x, M}, -1, opts); if (bin_faces.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return bin_faces; } const int chunk_size = 512; const size_t shared_size = num_bins_y * num_bins_x * chunk_size / 8; const size_t blocks = 64; const size_t threads = 512; RasterizeMeshesCoarseCudaKernel<<<blocks, threads, shared_size, stream>>>( face_verts.contiguous().data_ptr<float>(), mesh_to_face_first_idx.contiguous().data_ptr<int64_t>(), num_faces_per_mesh.contiguous().data_ptr<int64_t>(), blur_radius, N, F, H, W, bin_size, chunk_size, M, faces_per_bin.data_ptr<int32_t>(), bin_faces.data_ptr<int32_t>()); AT_CUDA_CHECK(cudaGetLastError()); return bin_faces; } // **************************************************************************** // * FINE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesFineCudaKernel( const float* face_verts, // (F, 3, 3) const int32_t* bin_faces, // (N, BH, BW, T) const float blur_radius, const int bin_size, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces, const int N, const int BH, const int BW, const int M, const int H, const int W, const int K, int64_t* face_idxs, // (N, H, W, K) float* zbuf, // (N, H, W, K) float* pix_dists, // (N, H, W, K) float* bary // (N, H, W, K, 3) ) { // This can be more than H * W if H or W are not divisible by bin_size. int num_pixels = N * BH * BW * bin_size * bin_size; int num_threads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int pid = tid; pid < num_pixels; pid += num_threads) { // Convert linear index into bin and pixel indices. We make the within // block pixel ids move the fastest, so that adjacent threads will fall // into the same bin; this should give them coalesced memory reads when // they read from faces and bin_faces. int i = pid; const int n = i / (BH * BW * bin_size * bin_size); i %= BH * BW * bin_size * bin_size; // bin index y const int by = i / (BW * bin_size * bin_size); i %= BW * bin_size * bin_size; // bin index y const int bx = i / (bin_size * bin_size); // pixel within the bin i %= bin_size * bin_size; // Pixel x, y indices const int yi = i / bin_size + by * bin_size; const int xi = i % bin_size + bx * bin_size; if (yi >= H || xi >= W) continue; const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); const float2 pxy = make_float2(xf, yf); // This part looks like the naive rasterization kernel, except we use // bin_faces to only look at a subset of faces already known to fall // in this bin. TODO abstract out this logic into some data structure // that is shared by both kernels? Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; for (int m = 0; m < M; m++) { const int f = bin_faces[n * BH * BW * M + by * BW * M + bx * M + m]; if (f < 0) { continue; // bin_faces uses -1 as a sentinal value. } // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct, clip_barycentric_coords, cull_backfaces); } // Now we've looked at all the faces for this bin, so we can write // output for the current pixel. // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); // Reverse ordering of the X and Y axis so that // in the image +Y is pointing up and +X is pointing left. const int yidx = H - 1 - yi; const int xidx = W - 1 - xi; const int pix_idx = n * H * W * K + yidx * W * K + xidx * K; for (int k = 0; k < q_size; k++) { face_idxs[pix_idx + k] = q[k].idx; zbuf[pix_idx + k] = q[k].z; pix_dists[pix_idx + k] = q[k].dist; bary[(pix_idx + k) * 3 + 0] = q[k].bary.x; bary[(pix_idx + k) * 3 + 1] = q[k].bary.y; bary[(pix_idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> RasterizeMeshesFineCuda( const at::Tensor& face_verts, const at::Tensor& bin_faces, const std::tuple<int, int> image_size, const float blur_radius, const int bin_size, const int faces_per_pixel, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); TORCH_CHECK(bin_faces.ndimension() == 4, "bin_faces must have 4 dimensions"); // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, bin_faces_t{bin_faces, "bin_faces", 2}; at::CheckedFrom c = "RasterizeMeshesFineCuda"; at::checkAllSameGPU(c, {face_verts_t, bin_faces_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(face_verts.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // bin_faces shape (N, BH, BW, M) const int N = bin_faces.size(0); const int BH = bin_faces.size(1); const int BW = bin_faces.size(2); const int M = bin_faces.size(3); const int K = faces_per_pixel; const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); if (K > kMaxPointsPerPixel) { AT_ERROR("Must have num_closest <= 150"); } auto long_opts = bin_faces.options().dtype(at::kLong); auto float_opts = face_verts.options().dtype(at::kFloat); at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); if (face_idxs.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; RasterizeMeshesFineCudaKernel<<<blocks, threads, 0, stream>>>( face_verts.contiguous().data_ptr<float>(), bin_faces.contiguous().data_ptr<int32_t>(), blur_radius, bin_size, perspective_correct, clip_barycentric_coords, cull_backfaces, N, BH, BW, M, H, W, K, face_idxs.data_ptr<int64_t>(), zbuf.data_ptr<float>(), pix_dists.data_ptr<float>(), bary.data_ptr<float>()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); }
a6369787d7c3778411fd903a4f182fc1065eee02.hip
// !!! This is a file automatically generated by hipify!!! #include "basic/GPUBasic.h" #include "model/GPUDevice.h" #include "math/GPUMath.h" #include "math/MatrixMultiply.h" namespace Deep8 { namespace Math { template <typename T> void MatrixMultiplyGPUImpl(GPUDevice* device, const T* x, const Shape& xshape, const T* y, const Shape& yshape, T* z, const Shape& zshape) { DEEP8_RUNTIME_ERROR("the type in not support"); } template <> void MatrixMultiplyGPUImpl<float>( GPUDevice* device, const float* x, const Shape& xshape, const float* y, const Shape& yshape, float* z, const Shape& zshape) { float alpha = 1; float beta = 0; if (1 == yshape.batch) { int m = xshape.batch * xshape.row(); int n = yshape.col(); int k = xshape.col(); CUBLAS_CHECK(hipblasSgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, y, n, x, k, &beta, z, n)); } else if (1 == xshape.batch && 1 == yshape.col()) { int row = xshape.row(); int col = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(hipblasSgemm(device->cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, row, b, col, &alpha, x, col, y, col, &beta, z, row)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto xptr = x + (b % xshape.batch) * xshape.batchSize(); auto yptr = y + (b % yshape.batch) * yshape.batchSize(); auto zptr = z + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(hipblasSgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, yptr, n, xptr, k, &beta, zptr, n)); } } } template <> void MatrixMultiplyGPUImpl<double>(GPUDevice* device, const double* x, const Shape& xshape, const double* y, const Shape& yshape, double* z, const Shape& zshape) { double alpha = 1; double beta = 0; if (1 == yshape.batch) { int m = xshape.batch * xshape.row(); int n = yshape.col(); int k = xshape.col(); CUBLAS_CHECK(hipblasDgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, y, n, x, k, &beta, z, n)); } else if (1 == xshape.batch && 1 == yshape.col()) { int row = xshape.row(); int col = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(hipblasDgemm(device->cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, row, b, col, &alpha, x, col, y, col, &beta, z, row)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto xptr = x + (b % xshape.batch) * xshape.batchSize(); auto yptr = y + (b % yshape.batch) * yshape.batchSize(); auto zptr = z + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(hipblasDgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, yptr, n, xptr, k, &beta, zptr, n)); } } } #ifdef HAVE_HALF template <> void MatrixMultiplyGPUImpl<half>(GPUDevice* device, const half* x, const Shape& xshape, const half* y, const Shape& yshape, half* z, const Shape& zshape) { half alpha = 1.0; half beta = 0.0; if (1 == yshape.batch) { int m = xshape.batch * xshape.row(); int n = yshape.col(); int k = xshape.col(); CUBLAS_CHECK(hipblasHgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, y, n, x, k, &beta, z, n)); } else if (1 == xshape.batch && 1 == yshape.col()) { int row = xshape.row(); int col = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(hipblasHgemm(device->cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, row, b, col, &alpha, x, col, y, col, &beta, z, row)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto xptr = x + (b % xshape.batch) * xshape.batchSize(); auto yptr = y + (b % yshape.batch) * yshape.batchSize(); auto zptr = z + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(hipblasHgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, yptr, n, xptr, k, &beta, zptr, n)); } } } #endif void MatrixMultiplyGPU(const Tensor &x, const Tensor &y, Tensor &z) { auto device = (GPUDevice*)x.device(); switch (x.elementType.id) { case DType::Float32: MatrixMultiplyGPUImpl<float>(device, x.data<float>(), x.shape, y.data<float>(), y.shape, z.data<float>(), z.shape); break; case DType::Float64: MatrixMultiplyGPUImpl<double>(device, x.data<double>(), x.shape, y.data<double>(), y.shape, z.data<double>(), z.shape); break; #ifdef HAVE_HALF case DType::Float16: MatrixMultiplyGPUImpl<half>(device, x.data<half>(), x.shape, y.data<half>(), y.shape, z.data<half>(), z.shape); break; #endif default: DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support"); break; } } /**gradient for X*/ template <typename T> void MatrixMultiplyGradXGPUImpl(GPUDevice *device, const T *x, T *dx, const Shape &xshape, const T *y, const Shape &yshape, const T *z, const T *dz, const Shape &zshape) { DEEP8_RUNTIME_ERROR("the type in not support"); } template <> void MatrixMultiplyGradXGPUImpl<float>( GPUDevice *device, const float *x, float *dx, const Shape &xshape, const float *y, const Shape &yshape, const float *z, const float *dz, const Shape &zshape) { float alpha = 1; float beta = 1; if (1 == yshape.batch) { int b = xshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); CUBLAS_CHECK(hipblasSgemm(device->cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, k, b * m, n, &alpha, y, n, dz, n, &beta, dx, k)); } else if (1 == xshape.batch && 1 == yshape.col()) { int m = xshape.row(); int k = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(hipblasSgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, k, m, b, &alpha, y, k, dz, m, &beta, dx, k)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto dxptr = dx + (b % xshape.batch) * xshape.batchSize(); auto yptr = y + (b % yshape.batch) * yshape.batchSize(); auto dzptr = dz + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(hipblasSgemm(device->cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, k, m, n, &alpha, yptr, n, dzptr, n, &beta, dxptr, k)); } } } template <> void MatrixMultiplyGradXGPUImpl<double>( GPUDevice *device, const double *x, double *dx, const Shape &xshape, const double *y, const Shape &yshape, const double *z, const double *dz, const Shape &zshape) { double alpha = 1; double beta = 1; if (1 == yshape.batch) { int b = xshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); CUBLAS_CHECK(hipblasDgemm(device->cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, k, b * m, n, &alpha, y, n, dz, n, &beta, dx, k)); } else if (1 == xshape.batch && 1 == yshape.col()) { int m = xshape.row(); int k = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(hipblasDgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, k, m, b, &alpha, y, k, dz, m, &beta, dx, k)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto dxptr = dx + (b % xshape.batch) * xshape.batchSize(); auto yptr = y + (b % yshape.batch) * yshape.batchSize(); auto dzptr = dz + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(hipblasDgemm(device->cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, k, m, n, &alpha, yptr, n, dzptr, n, &beta, dxptr, k)); } } } #ifdef HAVE_HALF template <> void MatrixMultiplyGradXGPUImpl<half>( GPUDevice *device, const half *x, half *dx, const Shape &xshape, const half *y, const Shape &yshape, const half *z, const half *dz, const Shape &zshape) { half alpha = 1.0; half beta = 1.0; if (1 == yshape.batch) { int b = xshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); CUBLAS_CHECK(hipblasHgemm(device->cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, k, b * m, n, &alpha, y, n, dz, n, &beta, dx, k)); } else if (1 == xshape.batch && 1 == yshape.col()) { int m = xshape.row(); int k = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(hipblasHgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, k, m, b, &alpha, y, k, dz, m, &beta, dx, k)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto dxptr = dx + (b % xshape.batch) * xshape.batchSize(); auto yptr = y + (b % yshape.batch) * yshape.batchSize(); auto dzptr = dz + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(hipblasHgemm(device->cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, k, m, n, &alpha, yptr, n, dzptr, n, &beta, dxptr, k)); } } } #endif void MatrixMultiplyGradXGPU(const Tensor &x, Tensor &dx, const Tensor &y, const Tensor &z, const Tensor &dz) { auto device = (GPUDevice*)x.device(); switch (x.elementType.id) { case DType::Float32: MatrixMultiplyGradXGPUImpl<float>(device, x.data<float>(), dx.data<float>(), x.shape, y.data<float>(), y.shape, z.data<float>(), dz.data<float>(), z.shape); break; case DType::Float64: MatrixMultiplyGradXGPUImpl<double>(device, x.data<double>(), dx.data<double>(), x.shape, y.data<double>(), y.shape, z.data<double>(), dz.data<double>(), z.shape); break; #ifdef HAVE_HALF case DType::Float16: MatrixMultiplyGradXGPUImpl<half>(device, x.data<half>(), dx.data<half>(), x.shape, y.data<half>(), y.shape, z.data<half>(), dz.data<half>(), z.shape); break; #endif default: DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support"); break; } } /**gradient for Y*/ template <typename T> void MatrixMultiplyGradYGPUImpl(GPUDevice *device, const T *x, const Shape &xshape, const T *y, T *dy, const Shape &yshape, const T *z, const T *dz, const Shape &zshape) { DEEP8_RUNTIME_ERROR("the type in not support"); } template <> void MatrixMultiplyGradYGPUImpl<float>( GPUDevice *device, const float *x, const Shape &xshape, const float *y, float *dy, const Shape &yshape, const float *z, const float *dz, const Shape &zshape) { float alpha = 1; float beta = 1; if (1 == yshape.batch) { int b = xshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); CUBLAS_CHECK(hipblasSgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, n, k, m * b, &alpha, dz, n, x, k, &beta, dy, n)); } else if (1 == xshape.batch && 1 == yshape.col()) { int m = xshape.row(); int k = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(hipblasSgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, k, b, m, &alpha, x, k, dz, m, &beta, dy, k)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto xptr = x + (b % xshape.batch) * xshape.batchSize(); auto dyptr = dy + (b % yshape.batch) * yshape.batchSize(); auto dzptr = dz + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(hipblasSgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, n, k, m, &alpha, dzptr, n, xptr, k, &beta, dyptr, n)); } } } template <> void MatrixMultiplyGradYGPUImpl<double>( GPUDevice *device, const double *x, const Shape &xshape, const double *y, double *dy, const Shape &yshape, const double *z, const double *dz, const Shape &zshape) { double alpha = 1; double beta = 1; if (1 == yshape.batch) { int b = xshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); CUBLAS_CHECK(hipblasDgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, n, k, m * b, &alpha, dz, n, x, k, &beta, dy, n)); } else if (1 == xshape.batch && 1 == yshape.col()) { int m = xshape.row(); int k = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(hipblasDgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, k, b, m, &alpha, x, k, dz, m, &beta, dy, k)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto xptr = x + (b % xshape.batch) * xshape.batchSize(); auto dyptr = dy + (b % yshape.batch) * yshape.batchSize(); auto dzptr = dz + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(hipblasDgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, n, k, m, &alpha, dzptr, n, xptr, k, &beta, dyptr, n)); } } } #ifdef HAVE_HALF template <> void MatrixMultiplyGradYGPUImpl<half>( GPUDevice *device, const half *x, const Shape &xshape, const half *y, half *dy, const Shape &yshape, const half *z, const half *dz, const Shape &zshape) { half alpha = 1.0; half beta = 1.0; if (1 == yshape.batch) { int b = (int)xshape.batch; int m = (int)xshape.row(); int k = (int)xshape.col(); int n = (int)yshape.col(); CUBLAS_CHECK(hipblasHgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, n, k, m * b, &alpha, dz, n, x, k, &beta, dy, n)); } else if (1 == xshape.batch && 1 == yshape.col()) { int m = (int)xshape.row(); int k = (int)xshape.col(); int b = (int)yshape.batch; CUBLAS_CHECK(hipblasHgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, k, b, m, &alpha, x, k, dz, m, &beta, dy, k)); } else { int batch = zshape.batch; int m = (int)xshape.row(); int k = (int)xshape.col(); int n = (int)yshape.col(); for (int b = 0; b < batch; ++b) { auto xptr = x + (b % xshape.batch) * xshape.batchSize(); auto dyptr = dy + (b % yshape.batch) * yshape.batchSize(); auto dzptr = dz + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(hipblasHgemm(device->cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, n, k, m, &alpha, dzptr, n, xptr, k, &beta, dyptr, n)); } } } #endif void MatrixMultiplyGradYGPU(const Tensor &x, const Tensor &y, Tensor &dy, const Tensor &z, const Tensor &dz) { auto device = (GPUDevice*)x.device(); switch (x.elementType.id) { case DType::Float32: MatrixMultiplyGradYGPUImpl<float>(device, x.data<float>(), x.shape, y.data<float>(), dy.data<float>(), y.shape, z.data<float>(), dz.data<float>(), z.shape); break; case DType::Float64: MatrixMultiplyGradYGPUImpl<double>(device, x.data<double>(), x.shape, y.data<double>(), dy.data<double>(), y.shape, z.data<double>(), dz.data<double>(), z.shape); break; #ifdef HAVE_HALF case DType::Float16: MatrixMultiplyGradYGPUImpl<half>(device, x.data<half>(), x.shape, y.data<half>(), dy.data<half>(), y.shape, z.data<half>(), dz.data<half>(), z.shape); break; #endif default: DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support"); break; } } } }
a6369787d7c3778411fd903a4f182fc1065eee02.cu
#include "basic/GPUBasic.h" #include "model/GPUDevice.h" #include "math/GPUMath.h" #include "math/MatrixMultiply.h" namespace Deep8 { namespace Math { template <typename T> void MatrixMultiplyGPUImpl(GPUDevice* device, const T* x, const Shape& xshape, const T* y, const Shape& yshape, T* z, const Shape& zshape) { DEEP8_RUNTIME_ERROR("the type in not support"); } template <> void MatrixMultiplyGPUImpl<float>( GPUDevice* device, const float* x, const Shape& xshape, const float* y, const Shape& yshape, float* z, const Shape& zshape) { float alpha = 1; float beta = 0; if (1 == yshape.batch) { int m = xshape.batch * xshape.row(); int n = yshape.col(); int k = xshape.col(); CUBLAS_CHECK(cublasSgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, y, n, x, k, &beta, z, n)); } else if (1 == xshape.batch && 1 == yshape.col()) { int row = xshape.row(); int col = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(cublasSgemm(device->cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, row, b, col, &alpha, x, col, y, col, &beta, z, row)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto xptr = x + (b % xshape.batch) * xshape.batchSize(); auto yptr = y + (b % yshape.batch) * yshape.batchSize(); auto zptr = z + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(cublasSgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, yptr, n, xptr, k, &beta, zptr, n)); } } } template <> void MatrixMultiplyGPUImpl<double>(GPUDevice* device, const double* x, const Shape& xshape, const double* y, const Shape& yshape, double* z, const Shape& zshape) { double alpha = 1; double beta = 0; if (1 == yshape.batch) { int m = xshape.batch * xshape.row(); int n = yshape.col(); int k = xshape.col(); CUBLAS_CHECK(cublasDgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, y, n, x, k, &beta, z, n)); } else if (1 == xshape.batch && 1 == yshape.col()) { int row = xshape.row(); int col = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(cublasDgemm(device->cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, row, b, col, &alpha, x, col, y, col, &beta, z, row)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto xptr = x + (b % xshape.batch) * xshape.batchSize(); auto yptr = y + (b % yshape.batch) * yshape.batchSize(); auto zptr = z + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(cublasDgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, yptr, n, xptr, k, &beta, zptr, n)); } } } #ifdef HAVE_HALF template <> void MatrixMultiplyGPUImpl<half>(GPUDevice* device, const half* x, const Shape& xshape, const half* y, const Shape& yshape, half* z, const Shape& zshape) { half alpha = 1.0; half beta = 0.0; if (1 == yshape.batch) { int m = xshape.batch * xshape.row(); int n = yshape.col(); int k = xshape.col(); CUBLAS_CHECK(cublasHgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, y, n, x, k, &beta, z, n)); } else if (1 == xshape.batch && 1 == yshape.col()) { int row = xshape.row(); int col = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(cublasHgemm(device->cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, row, b, col, &alpha, x, col, y, col, &beta, z, row)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto xptr = x + (b % xshape.batch) * xshape.batchSize(); auto yptr = y + (b % yshape.batch) * yshape.batchSize(); auto zptr = z + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(cublasHgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, yptr, n, xptr, k, &beta, zptr, n)); } } } #endif void MatrixMultiplyGPU(const Tensor &x, const Tensor &y, Tensor &z) { auto device = (GPUDevice*)x.device(); switch (x.elementType.id) { case DType::Float32: MatrixMultiplyGPUImpl<float>(device, x.data<float>(), x.shape, y.data<float>(), y.shape, z.data<float>(), z.shape); break; case DType::Float64: MatrixMultiplyGPUImpl<double>(device, x.data<double>(), x.shape, y.data<double>(), y.shape, z.data<double>(), z.shape); break; #ifdef HAVE_HALF case DType::Float16: MatrixMultiplyGPUImpl<half>(device, x.data<half>(), x.shape, y.data<half>(), y.shape, z.data<half>(), z.shape); break; #endif default: DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support"); break; } } /**gradient for X*/ template <typename T> void MatrixMultiplyGradXGPUImpl(GPUDevice *device, const T *x, T *dx, const Shape &xshape, const T *y, const Shape &yshape, const T *z, const T *dz, const Shape &zshape) { DEEP8_RUNTIME_ERROR("the type in not support"); } template <> void MatrixMultiplyGradXGPUImpl<float>( GPUDevice *device, const float *x, float *dx, const Shape &xshape, const float *y, const Shape &yshape, const float *z, const float *dz, const Shape &zshape) { float alpha = 1; float beta = 1; if (1 == yshape.batch) { int b = xshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); CUBLAS_CHECK(cublasSgemm(device->cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, k, b * m, n, &alpha, y, n, dz, n, &beta, dx, k)); } else if (1 == xshape.batch && 1 == yshape.col()) { int m = xshape.row(); int k = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(cublasSgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, k, m, b, &alpha, y, k, dz, m, &beta, dx, k)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto dxptr = dx + (b % xshape.batch) * xshape.batchSize(); auto yptr = y + (b % yshape.batch) * yshape.batchSize(); auto dzptr = dz + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(cublasSgemm(device->cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, k, m, n, &alpha, yptr, n, dzptr, n, &beta, dxptr, k)); } } } template <> void MatrixMultiplyGradXGPUImpl<double>( GPUDevice *device, const double *x, double *dx, const Shape &xshape, const double *y, const Shape &yshape, const double *z, const double *dz, const Shape &zshape) { double alpha = 1; double beta = 1; if (1 == yshape.batch) { int b = xshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); CUBLAS_CHECK(cublasDgemm(device->cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, k, b * m, n, &alpha, y, n, dz, n, &beta, dx, k)); } else if (1 == xshape.batch && 1 == yshape.col()) { int m = xshape.row(); int k = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(cublasDgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, k, m, b, &alpha, y, k, dz, m, &beta, dx, k)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto dxptr = dx + (b % xshape.batch) * xshape.batchSize(); auto yptr = y + (b % yshape.batch) * yshape.batchSize(); auto dzptr = dz + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(cublasDgemm(device->cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, k, m, n, &alpha, yptr, n, dzptr, n, &beta, dxptr, k)); } } } #ifdef HAVE_HALF template <> void MatrixMultiplyGradXGPUImpl<half>( GPUDevice *device, const half *x, half *dx, const Shape &xshape, const half *y, const Shape &yshape, const half *z, const half *dz, const Shape &zshape) { half alpha = 1.0; half beta = 1.0; if (1 == yshape.batch) { int b = xshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); CUBLAS_CHECK(cublasHgemm(device->cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, k, b * m, n, &alpha, y, n, dz, n, &beta, dx, k)); } else if (1 == xshape.batch && 1 == yshape.col()) { int m = xshape.row(); int k = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(cublasHgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, k, m, b, &alpha, y, k, dz, m, &beta, dx, k)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto dxptr = dx + (b % xshape.batch) * xshape.batchSize(); auto yptr = y + (b % yshape.batch) * yshape.batchSize(); auto dzptr = dz + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(cublasHgemm(device->cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, k, m, n, &alpha, yptr, n, dzptr, n, &beta, dxptr, k)); } } } #endif void MatrixMultiplyGradXGPU(const Tensor &x, Tensor &dx, const Tensor &y, const Tensor &z, const Tensor &dz) { auto device = (GPUDevice*)x.device(); switch (x.elementType.id) { case DType::Float32: MatrixMultiplyGradXGPUImpl<float>(device, x.data<float>(), dx.data<float>(), x.shape, y.data<float>(), y.shape, z.data<float>(), dz.data<float>(), z.shape); break; case DType::Float64: MatrixMultiplyGradXGPUImpl<double>(device, x.data<double>(), dx.data<double>(), x.shape, y.data<double>(), y.shape, z.data<double>(), dz.data<double>(), z.shape); break; #ifdef HAVE_HALF case DType::Float16: MatrixMultiplyGradXGPUImpl<half>(device, x.data<half>(), dx.data<half>(), x.shape, y.data<half>(), y.shape, z.data<half>(), dz.data<half>(), z.shape); break; #endif default: DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support"); break; } } /**gradient for Y*/ template <typename T> void MatrixMultiplyGradYGPUImpl(GPUDevice *device, const T *x, const Shape &xshape, const T *y, T *dy, const Shape &yshape, const T *z, const T *dz, const Shape &zshape) { DEEP8_RUNTIME_ERROR("the type in not support"); } template <> void MatrixMultiplyGradYGPUImpl<float>( GPUDevice *device, const float *x, const Shape &xshape, const float *y, float *dy, const Shape &yshape, const float *z, const float *dz, const Shape &zshape) { float alpha = 1; float beta = 1; if (1 == yshape.batch) { int b = xshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); CUBLAS_CHECK(cublasSgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, n, k, m * b, &alpha, dz, n, x, k, &beta, dy, n)); } else if (1 == xshape.batch && 1 == yshape.col()) { int m = xshape.row(); int k = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(cublasSgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, k, b, m, &alpha, x, k, dz, m, &beta, dy, k)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto xptr = x + (b % xshape.batch) * xshape.batchSize(); auto dyptr = dy + (b % yshape.batch) * yshape.batchSize(); auto dzptr = dz + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(cublasSgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, n, k, m, &alpha, dzptr, n, xptr, k, &beta, dyptr, n)); } } } template <> void MatrixMultiplyGradYGPUImpl<double>( GPUDevice *device, const double *x, const Shape &xshape, const double *y, double *dy, const Shape &yshape, const double *z, const double *dz, const Shape &zshape) { double alpha = 1; double beta = 1; if (1 == yshape.batch) { int b = xshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); CUBLAS_CHECK(cublasDgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, n, k, m * b, &alpha, dz, n, x, k, &beta, dy, n)); } else if (1 == xshape.batch && 1 == yshape.col()) { int m = xshape.row(); int k = xshape.col(); int b = yshape.batch; CUBLAS_CHECK(cublasDgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, k, b, m, &alpha, x, k, dz, m, &beta, dy, k)); } else { int batch = zshape.batch; int m = xshape.row(); int k = xshape.col(); int n = yshape.col(); for (int b = 0; b < batch; ++b) { auto xptr = x + (b % xshape.batch) * xshape.batchSize(); auto dyptr = dy + (b % yshape.batch) * yshape.batchSize(); auto dzptr = dz + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(cublasDgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, n, k, m, &alpha, dzptr, n, xptr, k, &beta, dyptr, n)); } } } #ifdef HAVE_HALF template <> void MatrixMultiplyGradYGPUImpl<half>( GPUDevice *device, const half *x, const Shape &xshape, const half *y, half *dy, const Shape &yshape, const half *z, const half *dz, const Shape &zshape) { half alpha = 1.0; half beta = 1.0; if (1 == yshape.batch) { int b = (int)xshape.batch; int m = (int)xshape.row(); int k = (int)xshape.col(); int n = (int)yshape.col(); CUBLAS_CHECK(cublasHgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, n, k, m * b, &alpha, dz, n, x, k, &beta, dy, n)); } else if (1 == xshape.batch && 1 == yshape.col()) { int m = (int)xshape.row(); int k = (int)xshape.col(); int b = (int)yshape.batch; CUBLAS_CHECK(cublasHgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, k, b, m, &alpha, x, k, dz, m, &beta, dy, k)); } else { int batch = zshape.batch; int m = (int)xshape.row(); int k = (int)xshape.col(); int n = (int)yshape.col(); for (int b = 0; b < batch; ++b) { auto xptr = x + (b % xshape.batch) * xshape.batchSize(); auto dyptr = dy + (b % yshape.batch) * yshape.batchSize(); auto dzptr = dz + (b % zshape.batch) * zshape.batchSize(); CUBLAS_CHECK(cublasHgemm(device->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, n, k, m, &alpha, dzptr, n, xptr, k, &beta, dyptr, n)); } } } #endif void MatrixMultiplyGradYGPU(const Tensor &x, const Tensor &y, Tensor &dy, const Tensor &z, const Tensor &dz) { auto device = (GPUDevice*)x.device(); switch (x.elementType.id) { case DType::Float32: MatrixMultiplyGradYGPUImpl<float>(device, x.data<float>(), x.shape, y.data<float>(), dy.data<float>(), y.shape, z.data<float>(), dz.data<float>(), z.shape); break; case DType::Float64: MatrixMultiplyGradYGPUImpl<double>(device, x.data<double>(), x.shape, y.data<double>(), dy.data<double>(), y.shape, z.data<double>(), dz.data<double>(), z.shape); break; #ifdef HAVE_HALF case DType::Float16: MatrixMultiplyGradYGPUImpl<half>(device, x.data<half>(), x.shape, y.data<half>(), dy.data<half>(), y.shape, z.data<half>(), dz.data<half>(), z.shape); break; #endif default: DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support"); break; } } } }
059fb4b6c0679a3c8fe840700d1879ec2cdfc4b4.hip
// !!! This is a file automatically generated by hipify!!! #include "..\Prerequisites.h" void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { char const * const errId = "GTOM:Projection:ForwardProj:InvalidInput"; mxInitGPU(); if (nrhs < 2) mexErrMsgIdAndTxt(errId, "Not enough parameters (2 or 3 expected)."); mxArrayAdapter volume(prhs[0]); int ndims = mxGetNumberOfDimensions(volume.underlyingarray); int3 dimsvolume = MWDimsToInt3(ndims, mxGetDimensions(volume.underlyingarray)); if (dimsvolume.x != dimsvolume.y || dimsvolume.x != dimsvolume.z) mexErrMsgIdAndTxt(errId, "Volume must be a cube."); tfloat* d_volume = volume.GetAsManagedDeviceTFloat(); mxArrayAdapter angles(prhs[1]); ndims = mxGetNumberOfDimensions(angles.underlyingarray); int3 dimsangles = MWDimsToInt3(ndims, mxGetDimensions(angles.underlyingarray)); if (dimsangles.x != 3) mexErrMsgIdAndTxt(errId, "3 values per column expected for angles."); tfloat3* h_angles = (tfloat3*)angles.GetAsManagedTFloat(); short kernelsize = 10; if (nrhs == 3) { mxArrayAdapter kernel(prhs[2]); tfloat* h_kernel = kernel.GetAsManagedTFloat(); kernelsize = (short)(h_kernel[0] + 0.5f); } if (kernelsize < 0 || kernelsize > dimsvolume.x / 2) mexErrMsgIdAndTxt(errId, "Kernel size should be between 0 and half of the volume side length."); int3 dimsproj = toInt3(dimsvolume.x, dimsvolume.x, 1); tfloat* d_proj; hipMalloc((void**)&d_proj, Elements(dimsproj) * dimsangles.y * sizeof(tfloat)); d_RemapFullFFT2Full(d_volume, d_volume, dimsvolume); d_ProjForward(d_volume, dimsvolume, d_proj, dimsproj, h_angles, kernelsize, dimsangles.y); d_RemapFull2FullFFT(d_proj, d_proj, dimsproj, dimsangles.y); mwSize outputdims[3]; outputdims[0] = dimsproj.x; outputdims[1] = dimsproj.y; outputdims[2] = dimsangles.y; mxArrayAdapter A(mxCreateNumericArray(3, outputdims, mxGetClassID(volume.underlyingarray), mxREAL)); A.SetFromDeviceTFloat(d_proj); hipFree(d_proj); plhs[0] = A.underlyingarray; }
059fb4b6c0679a3c8fe840700d1879ec2cdfc4b4.cu
#include "..\Prerequisites.h" void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { char const * const errId = "GTOM:Projection:ForwardProj:InvalidInput"; mxInitGPU(); if (nrhs < 2) mexErrMsgIdAndTxt(errId, "Not enough parameters (2 or 3 expected)."); mxArrayAdapter volume(prhs[0]); int ndims = mxGetNumberOfDimensions(volume.underlyingarray); int3 dimsvolume = MWDimsToInt3(ndims, mxGetDimensions(volume.underlyingarray)); if (dimsvolume.x != dimsvolume.y || dimsvolume.x != dimsvolume.z) mexErrMsgIdAndTxt(errId, "Volume must be a cube."); tfloat* d_volume = volume.GetAsManagedDeviceTFloat(); mxArrayAdapter angles(prhs[1]); ndims = mxGetNumberOfDimensions(angles.underlyingarray); int3 dimsangles = MWDimsToInt3(ndims, mxGetDimensions(angles.underlyingarray)); if (dimsangles.x != 3) mexErrMsgIdAndTxt(errId, "3 values per column expected for angles."); tfloat3* h_angles = (tfloat3*)angles.GetAsManagedTFloat(); short kernelsize = 10; if (nrhs == 3) { mxArrayAdapter kernel(prhs[2]); tfloat* h_kernel = kernel.GetAsManagedTFloat(); kernelsize = (short)(h_kernel[0] + 0.5f); } if (kernelsize < 0 || kernelsize > dimsvolume.x / 2) mexErrMsgIdAndTxt(errId, "Kernel size should be between 0 and half of the volume side length."); int3 dimsproj = toInt3(dimsvolume.x, dimsvolume.x, 1); tfloat* d_proj; cudaMalloc((void**)&d_proj, Elements(dimsproj) * dimsangles.y * sizeof(tfloat)); d_RemapFullFFT2Full(d_volume, d_volume, dimsvolume); d_ProjForward(d_volume, dimsvolume, d_proj, dimsproj, h_angles, kernelsize, dimsangles.y); d_RemapFull2FullFFT(d_proj, d_proj, dimsproj, dimsangles.y); mwSize outputdims[3]; outputdims[0] = dimsproj.x; outputdims[1] = dimsproj.y; outputdims[2] = dimsangles.y; mxArrayAdapter A(mxCreateNumericArray(3, outputdims, mxGetClassID(volume.underlyingarray), mxREAL)); A.SetFromDeviceTFloat(d_proj); cudaFree(d_proj); plhs[0] = A.underlyingarray; }
87ecbd9a2e7c4b0ac8fa6a5c9ee0c52483896168.hip
// !!! This is a file automatically generated by hipify!!! //histogram processing #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> hipError_t findFreqWithCuda(int *c, unsigned int fsize, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *b) { int i = threadIdx.x; #if __CUDA_ARCH__ >= 200 atomicAdd(&c[b[i]],1); #endif } int main() { const int arraySize = 11; const int fsize = 10005;//maximum value of the number in the array can be at max 10005 const int a[arraySize] = { 1, 2, 3, 4, 5,1,1,1,2,3,6 }; int maxx = INT_MIN; for (int i = 0; i < arraySize; i++) { //maxx = max(maxx, arr[i]); if (maxx < a[i]) maxx=a[i]; } int c[fsize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = findFreqWithCuda(c,fsize,a,arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "findFreqWithCuda failed!"); return 1; } for (int i = 0; i <= maxx; i++) { if (c[i] != 0) { printf("%d occurs %d times\n", i, c[i]); } } // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t findFreqWithCuda(int *c, unsigned int fsize, const int *b, unsigned int size) { // int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, fsize * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. //dim3 threadsPerBlock(11,11,11); hipLaunchKernelGGL(( addKernel), dim3(1),dim3(size), 0, 0, dev_c,dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, fsize * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_b); return cudaStatus; }
87ecbd9a2e7c4b0ac8fa6a5c9ee0c52483896168.cu
//histogram processing #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> cudaError_t findFreqWithCuda(int *c, unsigned int fsize, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *b) { int i = threadIdx.x; #if __CUDA_ARCH__ >= 200 atomicAdd(&c[b[i]],1); #endif } int main() { const int arraySize = 11; const int fsize = 10005;//maximum value of the number in the array can be at max 10005 const int a[arraySize] = { 1, 2, 3, 4, 5,1,1,1,2,3,6 }; int maxx = INT_MIN; for (int i = 0; i < arraySize; i++) { //maxx = max(maxx, arr[i]); if (maxx < a[i]) maxx=a[i]; } int c[fsize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = findFreqWithCuda(c,fsize,a,arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "findFreqWithCuda failed!"); return 1; } for (int i = 0; i <= maxx; i++) { if (c[i] != 0) { printf("%d occurs %d times\n", i, c[i]); } } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t findFreqWithCuda(int *c, unsigned int fsize, const int *b, unsigned int size) { // int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, fsize * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. //dim3 threadsPerBlock(11,11,11); addKernel<<<1,size>>>(dev_c,dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, fsize * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_b); return cudaStatus; }
138f2ca400513be2409d02462985e8f8da5a3ca2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "rotate.h" __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // Helper function for using CUDA to add vectors in parallel. hipError_t rotateWithCuda(std::vector<glm::vec3> *vertexIn, std::map<int, std::vector<glm::vec3>> *vertexOut, glm::vec3 rotation) { hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . /*cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; }*/ // Copy input vectors from host memory to GPU buffers. /*cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; }*/ // Launch a kernel on the GPU with one thread for each element. //addKernel <<<1, size >>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. /*cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; }*/ //GOTO statement for free any code during an error state Error: //hipFree(dev_c); return cudaStatus; }
138f2ca400513be2409d02462985e8f8da5a3ca2.cu
#include "rotate.h" __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // Helper function for using CUDA to add vectors in parallel. cudaError_t rotateWithCuda(std::vector<glm::vec3> *vertexIn, std::map<int, std::vector<glm::vec3>> *vertexOut, glm::vec3 rotation) { cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . /*cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; }*/ // Copy input vectors from host memory to GPU buffers. /*cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; }*/ // Launch a kernel on the GPU with one thread for each element. //addKernel <<<1, size >>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. /*cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; }*/ //GOTO statement for free any code during an error state Error: //cudaFree(dev_c); return cudaStatus; }
a7becb86a590fe15cc2cf3e9f8bf985d1ae03736.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2019 by Contributors #include <gtest/gtest.h> #include <xgboost/data.h> #include "../../../src/data/adapter.h" #include "../../../src/data/simple_dmatrix.h" #include "../../../src/common/timer.h" #include "../helpers.h" #include <thrust/device_vector.h> #include "../../../src/data/device_adapter.cuh" #include "test_array_interface.h" using namespace xgboost; // NOLINT void TestCudfAdapter() { constexpr size_t kRowsA {16}; constexpr size_t kRowsB {16}; std::vector<Json> columns; thrust::device_vector<double> d_data_0(kRowsA); thrust::device_vector<uint32_t> d_data_1(kRowsB); columns.emplace_back(GenerateDenseColumn<double>("<f8", kRowsA, &d_data_0)); columns.emplace_back(GenerateDenseColumn<uint32_t>("<u4", kRowsB, &d_data_1)); Json column_arr {columns}; std::string str; Json::Dump(column_arr, &str); data::CudfAdapter adapter(str); adapter.Next(); auto & batch = adapter.Value(); EXPECT_EQ(batch.Size(), kRowsA + kRowsB); EXPECT_NO_THROW({ dh::LaunchN(0, batch.Size(), [=] __device__(size_t idx) { auto element = batch.GetElement(idx); KERNEL_CHECK(element.row_idx == idx / 2); if (idx % 2 == 0) { KERNEL_CHECK(element.column_idx == 0); KERNEL_CHECK(element.value == element.row_idx * 2.0f); } else { KERNEL_CHECK(element.column_idx == 1); KERNEL_CHECK(element.value == element.row_idx * 2.0f); } }); dh::safe_cuda(hipDeviceSynchronize()); }); } TEST(DeviceAdapter, CudfAdapter) { TestCudfAdapter(); }
a7becb86a590fe15cc2cf3e9f8bf985d1ae03736.cu
// Copyright (c) 2019 by Contributors #include <gtest/gtest.h> #include <xgboost/data.h> #include "../../../src/data/adapter.h" #include "../../../src/data/simple_dmatrix.h" #include "../../../src/common/timer.h" #include "../helpers.h" #include <thrust/device_vector.h> #include "../../../src/data/device_adapter.cuh" #include "test_array_interface.h" using namespace xgboost; // NOLINT void TestCudfAdapter() { constexpr size_t kRowsA {16}; constexpr size_t kRowsB {16}; std::vector<Json> columns; thrust::device_vector<double> d_data_0(kRowsA); thrust::device_vector<uint32_t> d_data_1(kRowsB); columns.emplace_back(GenerateDenseColumn<double>("<f8", kRowsA, &d_data_0)); columns.emplace_back(GenerateDenseColumn<uint32_t>("<u4", kRowsB, &d_data_1)); Json column_arr {columns}; std::string str; Json::Dump(column_arr, &str); data::CudfAdapter adapter(str); adapter.Next(); auto & batch = adapter.Value(); EXPECT_EQ(batch.Size(), kRowsA + kRowsB); EXPECT_NO_THROW({ dh::LaunchN(0, batch.Size(), [=] __device__(size_t idx) { auto element = batch.GetElement(idx); KERNEL_CHECK(element.row_idx == idx / 2); if (idx % 2 == 0) { KERNEL_CHECK(element.column_idx == 0); KERNEL_CHECK(element.value == element.row_idx * 2.0f); } else { KERNEL_CHECK(element.column_idx == 1); KERNEL_CHECK(element.value == element.row_idx * 2.0f); } }); dh::safe_cuda(cudaDeviceSynchronize()); }); } TEST(DeviceAdapter, CudfAdapter) { TestCudfAdapter(); }
16f88d141e3a4dad3d8525572b6773ee6eebfb9f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMath.cu" #else THC_API void THCTensor_(fill)(THCState* state, THCTensor *self_, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (!THC_pointwiseApply1<real>( state, self_, TensorFillOp<real>(value))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(zero)(THCState *state, THCTensor *self_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (THCTensor_(isContiguous)(state, self_)) { THCudaCheck(hipMemsetAsync(THCTensor_(data)(state, self_), 0, sizeof(real) * THCTensor_(nElement)(state, self_), THCState_getCurrentStream(state))); } else { if (!THC_pointwiseApply1<real>( state, self_, TensorFillOp<real>(ScalarConvert<int, real>::to(0)))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor *input) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input)); THCTensor_(resizeAs)(state, r_, input); THCTensor_(zero)(state, r_); } THC_API void THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input)); THCTensor_(resizeAs)(state, r_, input); THCTensor_(fill)(state, r_, ScalarConvert<int, real>::to(1)); } ptrdiff_t THCTensor_(numel)(THCState *state, THCTensor *t) { return THCTensor_(nElement)(state, t); } void THCTensor_(cat)(THCState *state, THCTensor *result, THCTensor *ta, THCTensor *tb, int dimension) { THCTensor* inputs[2]; inputs[0] = ta; inputs[1] = tb; THCTensor_(catArray)(state, result, inputs, 2, dimension); } void THCTensor_(check_shape_except_dim)(THCState *state, THCTensor *first, THCTensor *second, int dimension); inline void THCTensor_(check_shape_except_dim)(THCState *state, THCTensor *first, THCTensor *second, int dimension) { int first_dims = first->dim(); int second_dims = second->dim(); THArgCheck(first_dims == second_dims, 0, "Tensors must have same number of dimensions: got %d and %d", first_dims, second_dims); for (int dim = 0; dim < first_dims; dim++) { if (dim == dimension) { continue; } int64_t first_dim_size = THCTensor_(size)(state, first, dim); int64_t second_dim_size = THCTensor_(size)(state, second, dim); THArgCheck(first_dim_size == second_dim_size, 0, "Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d", dimension, (long long)first_dim_size, (long long)second_dim_size, dim); } } void THCTensor_(catArray)(THCState *state, THCTensor *result, THCTensor **inputs, int numInputs, int dimension) { // previously, size [0] tensors were the only possible empty tensors; thus, it wasn't possible // to cat empty tensors unless all the other tensors were 1-dimensional, so we allowed these tensors // to be "skipped". We maintain this behavior for backwards compatibility, but only for this specific // size (i.e. other empty sizes are not skipped). // FIXME: warn if this is the case THLongStorage *size; int i, j, cohortMax; int64_t offset; bool hasSkippedInput = false; THCTensor *notSkippedTensor = NULL; // non-owning reference auto should_skip = [](THCTensor *t) { return t->is_empty() && t->dim() == 1; }; int nDims = 0; for (i = 0; i < numInputs; i++) { if (should_skip(inputs[i])) { hasSkippedInput = true; continue; } nDims = inputs[i]->dim(); notSkippedTensor = inputs[i]; } // If all inputs are empty tensors, return an empty tensor if (notSkippedTensor == NULL) { return; } THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs); THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension); size = THLongStorage_newWithSize(nDims); // Compute size of the result in the cat dimension int64_t cat_dim_size = 0; for (int i = 0; i < numInputs; i++) { THCTensor *tensor = inputs[i]; if (should_skip(tensor)) { continue; } THCTensor_(check_shape_except_dim)(state, notSkippedTensor, tensor, dimension); cat_dim_size += THCTensor_(size)(state, tensor, dimension); } // Compute the size of the result for (int dim = 0; dim < nDims; dim++) { int64_t result_dim_size = THCTensor_(size)(state, notSkippedTensor, dim); if (dim == dimension) { result_dim_size = cat_dim_size; } THLongStorage_data(size)[dim] = result_dim_size; } THCTensor_(resize)(state, result, size, NULL); THLongStorage_free(size); // We parallelize the copy if all 6 conditions pass: // // 1. There is more than one input tensor // 2. No empty inputs // 3. The result tensor is 32-bit indexable // 4. The number of dimensions is <= 4 // 5. All input tensors are contiguous (output tensor may be non-contig) // 6. All input tensors can use 32-bit indexing // 7. All input tensors are on the same device if (numInputs > 1 && !hasSkippedInput && result->dim() <= CAT_ARRAY_MAX_INPUT_DIMS && THCTensor_canUse32BitIndexMath(state, result) && THCTensor_allContiguous(state, inputs, numInputs) && THCTensor_all32BitIndexable(state, inputs, numInputs) && THCTensor_allSameDevice(state, inputs, numInputs)) { // First, let's set up our kernel parameters. We start with a raw pointer to the storage // for the output Tensor. real *data = THCTensor_(data)(state, result); // Kernel Parameter size_t tensorMetadataSize = sizeof(CatArrInputTensor<real, unsigned int>) * CAT_ARRAY_BATCH_SIZE; CatArrInputTensor<real, unsigned int> *d_inputs; THCudaCheck(THCudaMalloc(state, (void**) &d_inputs, tensorMetadataSize)); OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param; // Next, let's initialize the size, stride arrays for the output Tensor. for (i = 0; i < nDims; ++i) { param.outputSize[i] = THCTensor_(size)(state, result, i); param.outputStride[i] = THCTensor_(stride)(state, result, i); } THCStream* stream = THCState_getStream(state); // Template Declarations for dim = 1, 2, 3, 4 #define HANDLE_CASE(DIMS) \ hipLaunchKernelGGL(( CatArrayBatchedCopy<real, unsigned int, DIMS>), dim3(catGrid), dim3(applyBlock), 0, stream->stream, data, d_inputs, param, dimension, param.outputStride[dimension]); // Now we loop offset = 0; for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) { // Re-allocate stackInputs every iteration to avoid read-after-write hazard CatArrInputTensor<real, unsigned int>* stackInputs = (CatArrInputTensor<real, unsigned int>*) THCudaHostAlloc(state, tensorMetadataSize); cohortMax = 0; for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) { int64_t dimSize = THCTensor_(size)(state, inputs[i+j], dimension); stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]); stackInputs[j].offset = offset; stackInputs[j].dimSize = dimSize; stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]); cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements; // update offset offset += dimSize; } THCudaCheck(hipMemcpyAsync( d_inputs, stackInputs, j * sizeof(CatArrInputTensor<real, unsigned int>), hipMemcpyHostToDevice, stream->stream)); THCudaHostRecord(state, stackInputs); THCudaHostFree(state, stackInputs); // Next, let's consider how we set our kernel launch parameters. // We borrow from THCApply, which the kernel's internal indexing // is based on. dim3 applyBlock = getApplyBlock(); //Get grid where x dim fills half gpu and y dim is number of tensors. //This will have cating two tensors fill the entire grid, but prevent //many threads from needlessly load meta data if their sizes is small. dim3 catGrid; getCatGrid(state, j, catGrid); switch (nDims) { case 1: HANDLE_CASE(1); break; case 2: HANDLE_CASE(2); break; case 3: HANDLE_CASE(3); break; case 4: HANDLE_CASE(4); break; } THCudaCheck(hipGetLastError()); } THCudaCheck(THCudaFree(state, d_inputs)); #undef HANDLE_CASE } else { offset = 0; for (j = 0; j < numInputs; j++) { if (should_skip(inputs[j])) continue; int64_t dimSize = THCTensor_(size)(state, inputs[j], dimension); THCTensor *nt = THCTensor_(newWithTensor)(state, result); THCTensor_(narrow)(state, nt, NULL, dimension, offset, dimSize); THCTensor_(copy)(state, nt, inputs[j]); THCTensor_(free)(state, nt); offset += dimSize; } } } void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self )); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor)); using namespace thrust::placeholders; THCThrustAllocator thrustAlloc(state); self = THCTensor_(newContiguous)(state, self); thrust::device_ptr<real> self_data(THCTensor_(data)(state, self)); int num_dim = THCTensor_(_nDimension)(state, self); int64_t N = THCTensor_(nElement)(state, self); THCudaLongTensor_resize2d(state, tensor, N, num_dim); tensor = THCudaLongTensor_newContiguous(state, tensor); thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor)); thrust::counting_iterator<int64_t> idxfirst(0); thrust::counting_iterator<int64_t> idxlast = idxfirst + N; typedef thrust::device_ptr<int64_t> Iter; strided_range<Iter> strided_tensor(tensor_data, tensor_data+N*num_dim, num_dim); #if TORCH_HIP_VERSION >= 7000 hipStream_t stream = THCState_getCurrentStream(state); #endif strided_range<Iter>::iterator dend = thrust::copy_if( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(stream), #endif idxfirst, idxlast, self_data, strided_tensor.begin(), NonZeroOp<real>() ); int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend); int64_t div = 1; for (int dim = num_dim-1; dim >= 0; dim--) { strided_range<Iter> stride_dim(tensor_data+dim, tensor_data+N*num_dim, num_dim); thrust::transform( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(stream), #endif strided_tensor.begin(), strided_tensor.end(), stride_dim.begin(), idx_functor(div, self->size[dim]) ); div *= self->size[dim]; } THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim); THCTensor_(free)(state, self); THCudaLongTensor_free(state, tensor); THCudaCheck(hipGetLastError()); } void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_)); int nDimension = THCTensor_(_nDimension)(state, src_); THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector"); if (nDimension == 2) { int64_t stride0 = THCTensor_(stride)(state, src_, 0); int64_t stride1 = THCTensor_(stride)(state, src_, 1); int64_t size0 = THCTensor_(size)(state, src_, 0); int64_t size1 = THCTensor_(size)(state, src_, 1); int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1); THCTensor_(resize1d)(state, self_, size); int64_t strideSelf = THCTensor_(stride)(state, self_, 0); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x))); int64_t start = (k >= 0 ? k * stride1 : -k * stride0); hipLaunchKernelGGL(( THCTensor_copyFromDiagonal<real>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf); } else { ptrdiff_t totalElements = THCTensor_(nElement)(state, src_); ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k; int64_t strideSrc = THCTensor_(stride)(state, src_, 0); THCTensor_(resize2d)(state, self_, size, size); THCTensor_(zero)(state, self_); int64_t stride0 = THCTensor_(stride)(state, self_, 0); int64_t stride1 = THCTensor_(stride)(state, self_, 1); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x))); ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0); hipLaunchKernelGGL(( THCTensor_copyToDiagonal<real>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc); } THCudaCheck(hipGetLastError()); } void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); THArgCheck(n > 0, 1, "invalid argument"); if(m <= 0) m = n; THCTensor_(resize2d)(state, self_, n, m); THCTensor_(zero)(state, self_); int64_t sz = THMin(n, m); int64_t stride = THCTensor_(stride)(state, self_, 0) + THCTensor_(stride)(state, self_, 1); THCTensor *diag = THCTensor_(newWithStorage1d)(state, self_->storage, self_->storageOffset, sz, stride); THCTensor_(fill)(state, diag, ScalarConvert<int, real>::to(1)); THCTensor_(free)(state, diag); } accreal THCTensor_(trace)(THCState *state, THCTensor *src_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_)); THArgCheck((src_->_dim() == 2), 1, "expected a matrix"); THCTensor *diag = THCTensor_(new)(state); THCTensor_(diag)(state, diag, src_, 0); accreal trace = THCTensor_(sumall)(state, diag); THCTensor_(free)(state, diag); return trace; } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(linspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n); if (n == 1) THCTensor_(fill)(state, r_, a); else { THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ // if r_ is contiguous we can direct work on it : THCTensor_(newContiguous)(state, r_); real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a), ScalarConvert<int64_t,real>::to(n - 1)); LinspaceOp<real> linspace_method(a, step); thrust::device_ptr<real> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + n, linspace_method); if (!THCTensor_(isContiguous)(state, r_)) { // We need to move data back to r_ THCTensor_(freeCopyTo)(state, r, r_); } } THCudaCheck(hipGetLastError()); } void THCTensor_(logspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n); if (n == 1) THCTensor_(fill)(state, r_, THCNumerics<real>::exp10(a)); else { THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ : THCTensor_(newContiguous)(state, r_); real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a), ScalarConvert<int64_t,real>::to(n - 1)); LogspaceOp<real> logspace_method(a, step); thrust::device_ptr<real> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + n, logspace_method); if (!THCTensor_(isContiguous)(state, r_)) { THCTensor_(freeCopyTo)(state, r, r_); } } THCudaCheck(hipGetLastError()); } #endif void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(step > 0 || step < 0, 3, "step must be nonzero"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound inconsistent with step sign"); ptrdiff_t size = (ptrdiff_t) (((xmax - xmin) / step) + 1); if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size); THCTensor *r = THCTensor_(newContiguous)(state, r_); LinspaceOp<real,accreal> linspace_method(xmin, step); thrust::device_ptr<real> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + size, linspace_method); THCTensor_(freeCopyTo)(state, r, r_); THCudaCheck(hipGetLastError()); } void THCTensor_(arange)(THCState* state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(step > 0 || step < 0, 3, "step must be nonzero"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound inconsistent with step sign"); ptrdiff_t size = (ptrdiff_t) ceil(ScalarConvert<accreal, double>::to(xmax - xmin) / step); if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size); THCTensor *r = THCTensor_(newContiguous)(state, r_); LinspaceOp<real,accreal> linspace_method(xmin, step); thrust::device_ptr<real> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + size, linspace_method); THCTensor_(freeCopyTo)(state, r, r_); THCudaCheck(hipGetLastError()); } #endif
16f88d141e3a4dad3d8525572b6773ee6eebfb9f.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMath.cu" #else THC_API void THCTensor_(fill)(THCState* state, THCTensor *self_, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (!THC_pointwiseApply1<real>( state, self_, TensorFillOp<real>(value))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(zero)(THCState *state, THCTensor *self_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (THCTensor_(isContiguous)(state, self_)) { THCudaCheck(cudaMemsetAsync(THCTensor_(data)(state, self_), 0, sizeof(real) * THCTensor_(nElement)(state, self_), THCState_getCurrentStream(state))); } else { if (!THC_pointwiseApply1<real>( state, self_, TensorFillOp<real>(ScalarConvert<int, real>::to(0)))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor *input) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input)); THCTensor_(resizeAs)(state, r_, input); THCTensor_(zero)(state, r_); } THC_API void THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input)); THCTensor_(resizeAs)(state, r_, input); THCTensor_(fill)(state, r_, ScalarConvert<int, real>::to(1)); } ptrdiff_t THCTensor_(numel)(THCState *state, THCTensor *t) { return THCTensor_(nElement)(state, t); } void THCTensor_(cat)(THCState *state, THCTensor *result, THCTensor *ta, THCTensor *tb, int dimension) { THCTensor* inputs[2]; inputs[0] = ta; inputs[1] = tb; THCTensor_(catArray)(state, result, inputs, 2, dimension); } void THCTensor_(check_shape_except_dim)(THCState *state, THCTensor *first, THCTensor *second, int dimension); inline void THCTensor_(check_shape_except_dim)(THCState *state, THCTensor *first, THCTensor *second, int dimension) { int first_dims = first->dim(); int second_dims = second->dim(); THArgCheck(first_dims == second_dims, 0, "Tensors must have same number of dimensions: got %d and %d", first_dims, second_dims); for (int dim = 0; dim < first_dims; dim++) { if (dim == dimension) { continue; } int64_t first_dim_size = THCTensor_(size)(state, first, dim); int64_t second_dim_size = THCTensor_(size)(state, second, dim); THArgCheck(first_dim_size == second_dim_size, 0, "Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d", dimension, (long long)first_dim_size, (long long)second_dim_size, dim); } } void THCTensor_(catArray)(THCState *state, THCTensor *result, THCTensor **inputs, int numInputs, int dimension) { // previously, size [0] tensors were the only possible empty tensors; thus, it wasn't possible // to cat empty tensors unless all the other tensors were 1-dimensional, so we allowed these tensors // to be "skipped". We maintain this behavior for backwards compatibility, but only for this specific // size (i.e. other empty sizes are not skipped). // FIXME: warn if this is the case THLongStorage *size; int i, j, cohortMax; int64_t offset; bool hasSkippedInput = false; THCTensor *notSkippedTensor = NULL; // non-owning reference auto should_skip = [](THCTensor *t) { return t->is_empty() && t->dim() == 1; }; int nDims = 0; for (i = 0; i < numInputs; i++) { if (should_skip(inputs[i])) { hasSkippedInput = true; continue; } nDims = inputs[i]->dim(); notSkippedTensor = inputs[i]; } // If all inputs are empty tensors, return an empty tensor if (notSkippedTensor == NULL) { return; } THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs); THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension); size = THLongStorage_newWithSize(nDims); // Compute size of the result in the cat dimension int64_t cat_dim_size = 0; for (int i = 0; i < numInputs; i++) { THCTensor *tensor = inputs[i]; if (should_skip(tensor)) { continue; } THCTensor_(check_shape_except_dim)(state, notSkippedTensor, tensor, dimension); cat_dim_size += THCTensor_(size)(state, tensor, dimension); } // Compute the size of the result for (int dim = 0; dim < nDims; dim++) { int64_t result_dim_size = THCTensor_(size)(state, notSkippedTensor, dim); if (dim == dimension) { result_dim_size = cat_dim_size; } THLongStorage_data(size)[dim] = result_dim_size; } THCTensor_(resize)(state, result, size, NULL); THLongStorage_free(size); // We parallelize the copy if all 6 conditions pass: // // 1. There is more than one input tensor // 2. No empty inputs // 3. The result tensor is 32-bit indexable // 4. The number of dimensions is <= 4 // 5. All input tensors are contiguous (output tensor may be non-contig) // 6. All input tensors can use 32-bit indexing // 7. All input tensors are on the same device if (numInputs > 1 && !hasSkippedInput && result->dim() <= CAT_ARRAY_MAX_INPUT_DIMS && THCTensor_canUse32BitIndexMath(state, result) && THCTensor_allContiguous(state, inputs, numInputs) && THCTensor_all32BitIndexable(state, inputs, numInputs) && THCTensor_allSameDevice(state, inputs, numInputs)) { // First, let's set up our kernel parameters. We start with a raw pointer to the storage // for the output Tensor. real *data = THCTensor_(data)(state, result); // Kernel Parameter size_t tensorMetadataSize = sizeof(CatArrInputTensor<real, unsigned int>) * CAT_ARRAY_BATCH_SIZE; CatArrInputTensor<real, unsigned int> *d_inputs; THCudaCheck(THCudaMalloc(state, (void**) &d_inputs, tensorMetadataSize)); OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param; // Next, let's initialize the size, stride arrays for the output Tensor. for (i = 0; i < nDims; ++i) { param.outputSize[i] = THCTensor_(size)(state, result, i); param.outputStride[i] = THCTensor_(stride)(state, result, i); } THCStream* stream = THCState_getStream(state); // Template Declarations for dim = 1, 2, 3, 4 #define HANDLE_CASE(DIMS) \ CatArrayBatchedCopy<real, unsigned int, DIMS><<<catGrid, applyBlock, 0, stream->stream>>>(data, d_inputs, param, dimension, param.outputStride[dimension]); // Now we loop offset = 0; for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) { // Re-allocate stackInputs every iteration to avoid read-after-write hazard CatArrInputTensor<real, unsigned int>* stackInputs = (CatArrInputTensor<real, unsigned int>*) THCudaHostAlloc(state, tensorMetadataSize); cohortMax = 0; for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) { int64_t dimSize = THCTensor_(size)(state, inputs[i+j], dimension); stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]); stackInputs[j].offset = offset; stackInputs[j].dimSize = dimSize; stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]); cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements; // update offset offset += dimSize; } THCudaCheck(cudaMemcpyAsync( d_inputs, stackInputs, j * sizeof(CatArrInputTensor<real, unsigned int>), cudaMemcpyHostToDevice, stream->stream)); THCudaHostRecord(state, stackInputs); THCudaHostFree(state, stackInputs); // Next, let's consider how we set our kernel launch parameters. // We borrow from THCApply, which the kernel's internal indexing // is based on. dim3 applyBlock = getApplyBlock(); //Get grid where x dim fills half gpu and y dim is number of tensors. //This will have cating two tensors fill the entire grid, but prevent //many threads from needlessly load meta data if their sizes is small. dim3 catGrid; getCatGrid(state, j, catGrid); switch (nDims) { case 1: HANDLE_CASE(1); break; case 2: HANDLE_CASE(2); break; case 3: HANDLE_CASE(3); break; case 4: HANDLE_CASE(4); break; } THCudaCheck(cudaGetLastError()); } THCudaCheck(THCudaFree(state, d_inputs)); #undef HANDLE_CASE } else { offset = 0; for (j = 0; j < numInputs; j++) { if (should_skip(inputs[j])) continue; int64_t dimSize = THCTensor_(size)(state, inputs[j], dimension); THCTensor *nt = THCTensor_(newWithTensor)(state, result); THCTensor_(narrow)(state, nt, NULL, dimension, offset, dimSize); THCTensor_(copy)(state, nt, inputs[j]); THCTensor_(free)(state, nt); offset += dimSize; } } } void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self )); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor)); using namespace thrust::placeholders; THCThrustAllocator thrustAlloc(state); self = THCTensor_(newContiguous)(state, self); thrust::device_ptr<real> self_data(THCTensor_(data)(state, self)); int num_dim = THCTensor_(_nDimension)(state, self); int64_t N = THCTensor_(nElement)(state, self); THCudaLongTensor_resize2d(state, tensor, N, num_dim); tensor = THCudaLongTensor_newContiguous(state, tensor); thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor)); thrust::counting_iterator<int64_t> idxfirst(0); thrust::counting_iterator<int64_t> idxlast = idxfirst + N; typedef thrust::device_ptr<int64_t> Iter; strided_range<Iter> strided_tensor(tensor_data, tensor_data+N*num_dim, num_dim); #if CUDA_VERSION >= 7000 cudaStream_t stream = THCState_getCurrentStream(state); #endif strided_range<Iter>::iterator dend = thrust::copy_if( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(stream), #endif idxfirst, idxlast, self_data, strided_tensor.begin(), NonZeroOp<real>() ); int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend); int64_t div = 1; for (int dim = num_dim-1; dim >= 0; dim--) { strided_range<Iter> stride_dim(tensor_data+dim, tensor_data+N*num_dim, num_dim); thrust::transform( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(stream), #endif strided_tensor.begin(), strided_tensor.end(), stride_dim.begin(), idx_functor(div, self->size[dim]) ); div *= self->size[dim]; } THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim); THCTensor_(free)(state, self); THCudaLongTensor_free(state, tensor); THCudaCheck(cudaGetLastError()); } void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_)); int nDimension = THCTensor_(_nDimension)(state, src_); THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector"); if (nDimension == 2) { int64_t stride0 = THCTensor_(stride)(state, src_, 0); int64_t stride1 = THCTensor_(stride)(state, src_, 1); int64_t size0 = THCTensor_(size)(state, src_, 0); int64_t size1 = THCTensor_(size)(state, src_, 1); int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1); THCTensor_(resize1d)(state, self_, size); int64_t strideSelf = THCTensor_(stride)(state, self_, 0); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x))); int64_t start = (k >= 0 ? k * stride1 : -k * stride0); THCTensor_copyFromDiagonal<real><<<grid, threads, 0, THCState_getCurrentStream(state)>>> (THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf); } else { ptrdiff_t totalElements = THCTensor_(nElement)(state, src_); ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k; int64_t strideSrc = THCTensor_(stride)(state, src_, 0); THCTensor_(resize2d)(state, self_, size, size); THCTensor_(zero)(state, self_); int64_t stride0 = THCTensor_(stride)(state, self_, 0); int64_t stride1 = THCTensor_(stride)(state, self_, 1); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x))); ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0); THCTensor_copyToDiagonal<real><<<grid, threads, 0, THCState_getCurrentStream(state)>>> (THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc); } THCudaCheck(cudaGetLastError()); } void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); THArgCheck(n > 0, 1, "invalid argument"); if(m <= 0) m = n; THCTensor_(resize2d)(state, self_, n, m); THCTensor_(zero)(state, self_); int64_t sz = THMin(n, m); int64_t stride = THCTensor_(stride)(state, self_, 0) + THCTensor_(stride)(state, self_, 1); THCTensor *diag = THCTensor_(newWithStorage1d)(state, self_->storage, self_->storageOffset, sz, stride); THCTensor_(fill)(state, diag, ScalarConvert<int, real>::to(1)); THCTensor_(free)(state, diag); } accreal THCTensor_(trace)(THCState *state, THCTensor *src_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_)); THArgCheck((src_->_dim() == 2), 1, "expected a matrix"); THCTensor *diag = THCTensor_(new)(state); THCTensor_(diag)(state, diag, src_, 0); accreal trace = THCTensor_(sumall)(state, diag); THCTensor_(free)(state, diag); return trace; } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(linspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n); if (n == 1) THCTensor_(fill)(state, r_, a); else { THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ // if r_ is contiguous we can direct work on it : THCTensor_(newContiguous)(state, r_); real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a), ScalarConvert<int64_t,real>::to(n - 1)); LinspaceOp<real> linspace_method(a, step); thrust::device_ptr<real> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + n, linspace_method); if (!THCTensor_(isContiguous)(state, r_)) { // We need to move data back to r_ THCTensor_(freeCopyTo)(state, r, r_); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(logspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n); if (n == 1) THCTensor_(fill)(state, r_, THCNumerics<real>::exp10(a)); else { THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ : THCTensor_(newContiguous)(state, r_); real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a), ScalarConvert<int64_t,real>::to(n - 1)); LogspaceOp<real> logspace_method(a, step); thrust::device_ptr<real> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + n, logspace_method); if (!THCTensor_(isContiguous)(state, r_)) { THCTensor_(freeCopyTo)(state, r, r_); } } THCudaCheck(cudaGetLastError()); } #endif void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(step > 0 || step < 0, 3, "step must be nonzero"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound inconsistent with step sign"); ptrdiff_t size = (ptrdiff_t) (((xmax - xmin) / step) + 1); if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size); THCTensor *r = THCTensor_(newContiguous)(state, r_); LinspaceOp<real,accreal> linspace_method(xmin, step); thrust::device_ptr<real> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + size, linspace_method); THCTensor_(freeCopyTo)(state, r, r_); THCudaCheck(cudaGetLastError()); } void THCTensor_(arange)(THCState* state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(step > 0 || step < 0, 3, "step must be nonzero"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound inconsistent with step sign"); ptrdiff_t size = (ptrdiff_t) ceil(ScalarConvert<accreal, double>::to(xmax - xmin) / step); if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size); THCTensor *r = THCTensor_(newContiguous)(state, r_); LinspaceOp<real,accreal> linspace_method(xmin, step); thrust::device_ptr<real> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + size, linspace_method); THCTensor_(freeCopyTo)(state, r, r_); THCudaCheck(cudaGetLastError()); } #endif
497ffdc8f63b5076d4e5baa747fa8c1e6bf0bad5.hip
// !!! This is a file automatically generated by hipify!!! // Includes #include <stdio.h> #include <stdlib.h> // includes CUDA #include <hip/hip_runtime.h> // includes, project #include "../include/REPEATW.h" #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 #define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS #define LINE_SIZE 12 // Variables int* h_A; int* h_B; int* h_C; int* d_A; int* d_B; int* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(int*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){ if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal(int* A, int* C, int iterations){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation int sum=0; // Fill the L1 cache, Miss on every iteration for (int i=0; i<iterations ; i++){ //REPLACE_ITERATIONS REPEAT_L6(0); } C[0]=sum; __syncthreads(); } // Host code int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = (400*max_tid*LINE_SIZE); size_t size = N * sizeof(int) ; // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); if (h_A == 0) CleanupResources(); // h_B = (int*)malloc(size); // if (h_B == 0) CleanupResources(); h_C = (int*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); // RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( hipMalloc((void**)&d_A, size) ); // checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); hipEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); // checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); checkCudaErrors(hipEventRecord(start)); hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, iterations); checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); hipDeviceSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void){ // Free device memory if (d_A) hipFree(d_A); //if (d_B) // hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); // if (h_B) // free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(int* data, int n){ for (int i = 0; i < n; ++i) data[i] = (int)(rand() / RAND_MAX); }
497ffdc8f63b5076d4e5baa747fa8c1e6bf0bad5.cu
// Includes #include <stdio.h> #include <stdlib.h> // includes CUDA #include <cuda_runtime.h> // includes, project #include "../include/REPEATW.h" #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 #define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS #define LINE_SIZE 12 // Variables int* h_A; int* h_B; int* h_C; int* d_A; int* d_B; int* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(int*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ){ if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal(int* A, int* C, int iterations){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation int sum=0; // Fill the L1 cache, Miss on every iteration for (int i=0; i<iterations ; i++){ //REPLACE_ITERATIONS REPEAT_L6(0); } C[0]=sum; __syncthreads(); } // Host code int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = (400*max_tid*LINE_SIZE); size_t size = N * sizeof(int) ; // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); if (h_A == 0) CleanupResources(); // h_B = (int*)malloc(size); // if (h_B == 0) CleanupResources(); h_C = (int*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); // RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( cudaMalloc((void**)&d_A, size) ); // checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); cudaEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); // checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); checkCudaErrors(cudaEventRecord(start)); PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, iterations); checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); cudaThreadSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void){ // Free device memory if (d_A) cudaFree(d_A); //if (d_B) // cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); // if (h_B) // free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(int* data, int n){ for (int i = 0; i < n; ++i) data[i] = (int)(rand() / RAND_MAX); }
73dcaecb7f8e93784fe500634557eac59746fc26.hip
// !!! This is a file automatically generated by hipify!!! #include "FVL/CFVMat.h" using namespace FVL; #include <vector> #include <iostream> using std::cout; #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> __global__ void kernel_sum(int **X, int **Y, int **R, int w, int h) { unsigned int tidX = blockIdx.x * blockDim.x + threadIdx.x; unsigned int tidY = blockIdx.y * blockDim.y + threadIdx.y; unsigned int tidR = blockIdx.z * blockDim.z + threadIdx.z; int elem = tidY * w + tidX; int* matX = X[elem]; int* matY = Y[elem]; int* matR = R[elem]; matR[tidR] = matX[tidR] + matY[tidR]; } int main() { // array of 5 2x2 matrixes CFVMat<int> matX(2, 2, 5); CFVMat<int> matY(2, 2, 5); CFVMat<int> matR(2, 2, 5); for(unsigned int y = 0; y < 2; ++y) for(unsigned int x = 0; x < 2; ++x) for(unsigned int i = 0; i < 5; ++i) matX.elem(x, y, i) = matY.elem(x, y, i) = i; matX.cuda_mallocAndSave(); matY.cuda_mallocAndSave(); matR.cuda_malloc(); dim3 gridDim = 1; dim3 blockDim(2, 2, 5); hipLaunchKernelGGL(( kernel_sum), dim3(gridDim), dim3(blockDim), 0, 0, matX.cuda_getMat(), matY.cuda_getMat(), matR.cuda_getMat(), matX.width(), matY.height()); matX.cuda_get(); matR.cuda_get(); for(unsigned int y = 0; y < 2; ++y) { for(unsigned int x = 0; x < 2; ++x) { cout << "coord: (" << x << ", " << y << ")"; for(unsigned int i = 0; i < 5; ++i) cout << " " << matX.elem(x, y, i); cout << endl; } } cout << endl; for(unsigned int y = 0; y < 2; ++y) { for(unsigned int x = 0; x < 2; ++x) { cout << "coord: (" << x << ", " << y << ")"; for(unsigned int i = 0; i < 5; ++i) cout << " " << matY.elem(x, y, i); cout << endl; } } cout << endl; for(unsigned int y = 0; y < 2; ++y) { for(unsigned int x = 0; x < 2; ++x) { cout << "coord: (" << x << ", " << y << ")"; for(unsigned int i = 0; i < 5; ++i) cout << " " << matR.elem(x, y, i); cout << endl; } } }
73dcaecb7f8e93784fe500634557eac59746fc26.cu
#include "FVL/CFVMat.h" using namespace FVL; #include <vector> #include <iostream> using std::cout; #include <cuda.h> #include <cuda_runtime_api.h> __global__ void kernel_sum(int **X, int **Y, int **R, int w, int h) { unsigned int tidX = blockIdx.x * blockDim.x + threadIdx.x; unsigned int tidY = blockIdx.y * blockDim.y + threadIdx.y; unsigned int tidR = blockIdx.z * blockDim.z + threadIdx.z; int elem = tidY * w + tidX; int* matX = X[elem]; int* matY = Y[elem]; int* matR = R[elem]; matR[tidR] = matX[tidR] + matY[tidR]; } int main() { // array of 5 2x2 matrixes CFVMat<int> matX(2, 2, 5); CFVMat<int> matY(2, 2, 5); CFVMat<int> matR(2, 2, 5); for(unsigned int y = 0; y < 2; ++y) for(unsigned int x = 0; x < 2; ++x) for(unsigned int i = 0; i < 5; ++i) matX.elem(x, y, i) = matY.elem(x, y, i) = i; matX.cuda_mallocAndSave(); matY.cuda_mallocAndSave(); matR.cuda_malloc(); dim3 gridDim = 1; dim3 blockDim(2, 2, 5); kernel_sum<<<gridDim, blockDim>>>( matX.cuda_getMat(), matY.cuda_getMat(), matR.cuda_getMat(), matX.width(), matY.height()); matX.cuda_get(); matR.cuda_get(); for(unsigned int y = 0; y < 2; ++y) { for(unsigned int x = 0; x < 2; ++x) { cout << "coord: (" << x << ", " << y << ")"; for(unsigned int i = 0; i < 5; ++i) cout << " " << matX.elem(x, y, i); cout << endl; } } cout << endl; for(unsigned int y = 0; y < 2; ++y) { for(unsigned int x = 0; x < 2; ++x) { cout << "coord: (" << x << ", " << y << ")"; for(unsigned int i = 0; i < 5; ++i) cout << " " << matY.elem(x, y, i); cout << endl; } } cout << endl; for(unsigned int y = 0; y < 2; ++y) { for(unsigned int x = 0; x < 2; ++x) { cout << "coord: (" << x << ", " << y << ")"; for(unsigned int i = 0; i < 5; ++i) cout << " " << matR.elem(x, y, i); cout << endl; } } }
sgeqr2_batched.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @author Azzam Haidar @author Tingxing Dong @generated from magmablas/zgeqr2_batched.cu, normal z -> s, Wed Jan 2 14:18:51 2019 */ #include "magma_internal.h" #include "magma_templates.h" #include "batched_kernel_param.h" #define BLOCK_SIZE 256 #define dA(a_1,a_2) (dA + (a_1) + (a_2)*(local_lda)) #include "slarfg_devicesfunc.cuh" /******************************************************************************/ static __device__ void slarfx_device( int m, int n, float *v, float *tau, float *dc, magma_int_t ldc, float* sum) { if (n <= 0) return; if (MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) return; // check singularity const int tx = threadIdx.x; float lsum; for (int k=0; k < n; k++) { /* perform w := v' * C */ if (tx < BLOCK_SIZE) { if (tx == 0) lsum = dc[0+ldc*k]; //since V[0] should be one else lsum = MAGMA_S_ZERO; for (int j = tx+1; j < m; j += BLOCK_SIZE) { lsum += MAGMA_S_MUL( MAGMA_S_CONJ( v[j] ), dc[j+ldc*k] ); } sum[tx] = lsum; } magma_sum_reduce< BLOCK_SIZE >( tx, sum ); __syncthreads(); float z__1 = - MAGMA_S_CONJ(*tau) * sum[0]; /* C := C - v * w */ if (tx < BLOCK_SIZE) { for (int j = tx+1; j < m; j += BLOCK_SIZE) dc[j+ldc*k] += z__1 * v[j]; } if (tx == 0) dc[0+ldc*k] += z__1; __syncthreads(); } } /******************************************************************************/ static __device__ void sgeqr2_device( magma_int_t m, magma_int_t n, float* dA, magma_int_t lda, float *dtau, float *dv, float *sum, float *swork, float *scale, float *sscale) { //lapack slarfg, compute the norm, scale and generate the householder vector slarfg_device(m, dv, &(dv[1]), 1, dtau, swork, sscale, scale); __syncthreads(); //update the trailing matix with the householder slarfx_device(m, n, dv, dtau, dA, lda, sum); __syncthreads(); } /******************************************************************************/ extern __shared__ float shared_data[]; /******************************************************************************/ __global__ void sgeqr2_sm_kernel_batched( int m, int n, float** dA_array, magma_int_t lda, float **dtau_array) { float* dA = dA_array[blockIdx.z]; float* dtau = dtau_array[blockIdx.z]; float *sdata = (float*)shared_data; const int tx = threadIdx.x; __shared__ float scale; __shared__ float sum[ BLOCK_SIZE ]; __shared__ float swork[ BLOCK_SIZE ]; __shared__ float sscale; //load data from global to shared memory for (int s=0; s < n; s++) { for (int j = tx; j < m; j += BLOCK_SIZE) { sdata[j + s * m] = dA[j + s * lda]; } } __syncthreads(); for (int s=0; s < min(m,n); s++) { sgeqr2_device( m-s, n-(s+1), &(sdata[s+(s+1)*m]), m, dtau+s, &(sdata[s+s*m]), sum, swork, &scale, &sscale); } // end of s //copy back to global memory for (int s=0; s < n; s++) { for (int j = tx; j < m; j += BLOCK_SIZE) { dA[j + s * lda] = sdata[j + s * m]; } } } /******************************************************************************/ __global__ void sgeqr2_column_sm_kernel_batched( int m, int n, float** dA_array, magma_int_t lda, float **dtau_array) { float* dA = dA_array[blockIdx.z]; float* dtau = dtau_array[blockIdx.z]; float *sdata = (float*)shared_data; __shared__ float scale; __shared__ float sum[ BLOCK_SIZE ]; __shared__ float swork[ BLOCK_SIZE ]; __shared__ float sscale; const int tx = threadIdx.x; for (int s=0; s < min(m,n); s++) { //load one vector in shared memory: sdata for (int j = tx; j < m-s; j += BLOCK_SIZE) { sdata[j] = dA[s + j + s * lda]; } __syncthreads(); //sdata is written sgeqr2_device(m-s, n-(s+1), &(dA[s+(s+1)*lda]), lda, dtau+s, sdata, sum, swork, &scale, &sscale); for (int j = tx; j < m-s; j += BLOCK_SIZE) { dA[s + j + s * lda] = sdata[j]; } __syncthreads(); } } /******************************************************************************/ __global__ void sgeqr2_kernel_batched( int m, int n, float** dA_array, magma_int_t lda, float **dtau_array) { float* dA = dA_array[blockIdx.z]; float* dtau = dtau_array[blockIdx.z]; __shared__ float scale; __shared__ float sum[ BLOCK_SIZE ]; __shared__ float swork[ BLOCK_SIZE ]; __shared__ float sscale; for (int s=0; s < min(m,n); s++) { sgeqr2_device( m-s, n-(s+1), &(dA[s+(s+1)*lda]), lda, dtau+s, &(dA[s+s*lda]), sum, swork, &scale, &sscale ); } } /***************************************************************************//** Purpose ------- SGEQR2 computes a QR factorization of a real m by n matrix A: A = Q * R. This version implements the right-looking QR with non-blocking. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in,out] dA_array Array of pointers, dimension (batchCount). Each is a REAL array on the GPU, dimension (LDDA,N) On entry, the M-by-N matrix A. On exit, the elements on and above the diagonal of the array contain the min(M,N)-by-N upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the orthogonal matrix Q as a product of min(m,n) elementary reflectors (see Further Details). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). To benefit from coalescent memory accesses LDDA must be divisible by 16. @param[out] dtau_array Array of pointers, dimension (batchCount). Each is a REAL array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). @param[out] info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices. - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value or another error occured, such as memory allocation failed. @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. Further Details --------------- The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). @ingroup magma_geqr2_batched *******************************************************************************/ extern "C" magma_int_t magma_sgeqr2_batched(magma_int_t m, magma_int_t n, float **dA_array, magma_int_t ldda, float **dtau_array, magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t k; /* Check arguments */ magma_int_t arginfo = 0; if (m < 0) arginfo = -1; else if (n < 0) arginfo = -2; else if (ldda < max(1,m)) arginfo = -4; if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } k = min(m,n); dim3 blocks(1, 1, batchCount); dim3 threads(BLOCK_SIZE); if (sizeof(float)*(m*k) <= 42000 /*sizeof(float) * 128 * k*/) // there are some static shared memory besides of dynamic ones { //load panel in shared memory and factorize it and copy back to gloabl memory //intend for small panel to avoid overfill of shared memory. //this kernel is composed of device routine and thus clean hipLaunchKernelGGL(( sgeqr2_sm_kernel_batched), dim3(blocks), dim3(threads), sizeof(float)*(m*k), queue->cuda_stream() , m, k, dA_array, ldda, dtau_array); } else { //load one column vector in shared memory and householder it and used it to update trailing matrix which is global memory // one vector is normally smaller than 48K shared memory if (sizeof(float)*(m) < 42000) hipLaunchKernelGGL(( sgeqr2_column_sm_kernel_batched), dim3(blocks), dim3(threads), sizeof(float)*(m), queue->cuda_stream() , m, k, dA_array, ldda, dtau_array); else //not use dynamic shared memory at all hipLaunchKernelGGL(( sgeqr2_kernel_batched), dim3(blocks), dim3(threads), 0, queue->cuda_stream() , m, k, dA_array, ldda, dtau_array); } return arginfo; }
sgeqr2_batched.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @author Azzam Haidar @author Tingxing Dong @generated from magmablas/zgeqr2_batched.cu, normal z -> s, Wed Jan 2 14:18:51 2019 */ #include "magma_internal.h" #include "magma_templates.h" #include "batched_kernel_param.h" #define BLOCK_SIZE 256 #define dA(a_1,a_2) (dA + (a_1) + (a_2)*(local_lda)) #include "slarfg_devicesfunc.cuh" /******************************************************************************/ static __device__ void slarfx_device( int m, int n, float *v, float *tau, float *dc, magma_int_t ldc, float* sum) { if (n <= 0) return; if (MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) return; // check singularity const int tx = threadIdx.x; float lsum; for (int k=0; k < n; k++) { /* perform w := v' * C */ if (tx < BLOCK_SIZE) { if (tx == 0) lsum = dc[0+ldc*k]; //since V[0] should be one else lsum = MAGMA_S_ZERO; for (int j = tx+1; j < m; j += BLOCK_SIZE) { lsum += MAGMA_S_MUL( MAGMA_S_CONJ( v[j] ), dc[j+ldc*k] ); } sum[tx] = lsum; } magma_sum_reduce< BLOCK_SIZE >( tx, sum ); __syncthreads(); float z__1 = - MAGMA_S_CONJ(*tau) * sum[0]; /* C := C - v * w */ if (tx < BLOCK_SIZE) { for (int j = tx+1; j < m; j += BLOCK_SIZE) dc[j+ldc*k] += z__1 * v[j]; } if (tx == 0) dc[0+ldc*k] += z__1; __syncthreads(); } } /******************************************************************************/ static __device__ void sgeqr2_device( magma_int_t m, magma_int_t n, float* dA, magma_int_t lda, float *dtau, float *dv, float *sum, float *swork, float *scale, float *sscale) { //lapack slarfg, compute the norm, scale and generate the householder vector slarfg_device(m, dv, &(dv[1]), 1, dtau, swork, sscale, scale); __syncthreads(); //update the trailing matix with the householder slarfx_device(m, n, dv, dtau, dA, lda, sum); __syncthreads(); } /******************************************************************************/ extern __shared__ float shared_data[]; /******************************************************************************/ __global__ void sgeqr2_sm_kernel_batched( int m, int n, float** dA_array, magma_int_t lda, float **dtau_array) { float* dA = dA_array[blockIdx.z]; float* dtau = dtau_array[blockIdx.z]; float *sdata = (float*)shared_data; const int tx = threadIdx.x; __shared__ float scale; __shared__ float sum[ BLOCK_SIZE ]; __shared__ float swork[ BLOCK_SIZE ]; __shared__ float sscale; //load data from global to shared memory for (int s=0; s < n; s++) { for (int j = tx; j < m; j += BLOCK_SIZE) { sdata[j + s * m] = dA[j + s * lda]; } } __syncthreads(); for (int s=0; s < min(m,n); s++) { sgeqr2_device( m-s, n-(s+1), &(sdata[s+(s+1)*m]), m, dtau+s, &(sdata[s+s*m]), sum, swork, &scale, &sscale); } // end of s //copy back to global memory for (int s=0; s < n; s++) { for (int j = tx; j < m; j += BLOCK_SIZE) { dA[j + s * lda] = sdata[j + s * m]; } } } /******************************************************************************/ __global__ void sgeqr2_column_sm_kernel_batched( int m, int n, float** dA_array, magma_int_t lda, float **dtau_array) { float* dA = dA_array[blockIdx.z]; float* dtau = dtau_array[blockIdx.z]; float *sdata = (float*)shared_data; __shared__ float scale; __shared__ float sum[ BLOCK_SIZE ]; __shared__ float swork[ BLOCK_SIZE ]; __shared__ float sscale; const int tx = threadIdx.x; for (int s=0; s < min(m,n); s++) { //load one vector in shared memory: sdata for (int j = tx; j < m-s; j += BLOCK_SIZE) { sdata[j] = dA[s + j + s * lda]; } __syncthreads(); //sdata is written sgeqr2_device(m-s, n-(s+1), &(dA[s+(s+1)*lda]), lda, dtau+s, sdata, sum, swork, &scale, &sscale); for (int j = tx; j < m-s; j += BLOCK_SIZE) { dA[s + j + s * lda] = sdata[j]; } __syncthreads(); } } /******************************************************************************/ __global__ void sgeqr2_kernel_batched( int m, int n, float** dA_array, magma_int_t lda, float **dtau_array) { float* dA = dA_array[blockIdx.z]; float* dtau = dtau_array[blockIdx.z]; __shared__ float scale; __shared__ float sum[ BLOCK_SIZE ]; __shared__ float swork[ BLOCK_SIZE ]; __shared__ float sscale; for (int s=0; s < min(m,n); s++) { sgeqr2_device( m-s, n-(s+1), &(dA[s+(s+1)*lda]), lda, dtau+s, &(dA[s+s*lda]), sum, swork, &scale, &sscale ); } } /***************************************************************************//** Purpose ------- SGEQR2 computes a QR factorization of a real m by n matrix A: A = Q * R. This version implements the right-looking QR with non-blocking. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in,out] dA_array Array of pointers, dimension (batchCount). Each is a REAL array on the GPU, dimension (LDDA,N) On entry, the M-by-N matrix A. On exit, the elements on and above the diagonal of the array contain the min(M,N)-by-N upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the orthogonal matrix Q as a product of min(m,n) elementary reflectors (see Further Details). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). To benefit from coalescent memory accesses LDDA must be divisible by 16. @param[out] dtau_array Array of pointers, dimension (batchCount). Each is a REAL array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). @param[out] info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices. - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value or another error occured, such as memory allocation failed. @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. Further Details --------------- The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). @ingroup magma_geqr2_batched *******************************************************************************/ extern "C" magma_int_t magma_sgeqr2_batched(magma_int_t m, magma_int_t n, float **dA_array, magma_int_t ldda, float **dtau_array, magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t k; /* Check arguments */ magma_int_t arginfo = 0; if (m < 0) arginfo = -1; else if (n < 0) arginfo = -2; else if (ldda < max(1,m)) arginfo = -4; if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } k = min(m,n); dim3 blocks(1, 1, batchCount); dim3 threads(BLOCK_SIZE); if (sizeof(float)*(m*k) <= 42000 /*sizeof(float) * 128 * k*/) // there are some static shared memory besides of dynamic ones { //load panel in shared memory and factorize it and copy back to gloabl memory //intend for small panel to avoid overfill of shared memory. //this kernel is composed of device routine and thus clean sgeqr2_sm_kernel_batched<<< blocks, threads, sizeof(float)*(m*k), queue->cuda_stream() >>> (m, k, dA_array, ldda, dtau_array); } else { //load one column vector in shared memory and householder it and used it to update trailing matrix which is global memory // one vector is normally smaller than 48K shared memory if (sizeof(float)*(m) < 42000) sgeqr2_column_sm_kernel_batched<<< blocks, threads, sizeof(float)*(m), queue->cuda_stream() >>> (m, k, dA_array, ldda, dtau_array); else //not use dynamic shared memory at all sgeqr2_kernel_batched<<< blocks, threads, 0, queue->cuda_stream() >>> (m, k, dA_array, ldda, dtau_array); } return arginfo; }
e620ad3e9408ced7043f70c740268c775c6785bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_ENTRYWISE_LAYER_INSTANTIATE #include "lbann/layers/loss/entrywise.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { /** GPU kernel to apply an binary backprop operator. */ template <template <typename> class BinaryBackPropOperator, typename TensorDataType> __global__ void binary_backprop_operator_kernel(El::Int height, El::Int width, const TensorDataType* __restrict__ x1, El::Int x1_ldim, const TensorDataType* __restrict__ x2, El::Int x2_ldim, const TensorDataType* __restrict__ dy, El::Int dy_ldim, TensorDataType* __restrict__ dx1, El::Int dx1_ldim, TensorDataType* __restrict__ dx2, El::Int dx2_ldim) { const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; const El::Int size = height * width; const El::Int num_threads = blockDim.x * gridDim.x; BinaryBackPropOperator<TensorDataType> op; for (El::Int pos = gid; pos < size; pos += num_threads) { const auto& row = pos % height; const auto& col = pos / height; op(x1[row + col * x1_ldim], x2[row + col * x2_ldim], dy[row + col * dy_ldim], dx1[row + col * dx1_ldim], dx2[row + col * dx2_ldim]); } } /** Apply a binary backprop operator to CPU data. * The input and output data must be on CPU and must have the same * dimensions. Given a binary function \f$ y = f(x_1,x_2) \f$, the * corresponding BinaryBackPropOperator is a 5-ary function with the * arguments \f$ x_1 \f$, \f$ x_2 \f$, \f$ dL/dy \f$, \f$ dL/dx_1\f$, * \f$ dL/dx_2 \f$. The last two arguments should be overwritten when * the BinaryBackPropOperator is called. */ template <template <typename> class Op, typename TensorDataType> void apply_binary_backprop_operator( const El::AbstractMatrix<TensorDataType>& x1, const El::AbstractMatrix<TensorDataType>& x2, const El::AbstractMatrix<TensorDataType>& dy, El::AbstractMatrix<TensorDataType>& dx1, El::AbstractMatrix<TensorDataType>& dx2) { // Get CUDA grid dimensions // Note: Maximum CUDA grid dimension is 2^32-1 // (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications). // TODO: HIP/ROCM notes const El::Int height = x1.Height(); const El::Int width = x1.Width(); const El::Int block_dim = 256; El::Int grid_dim = (height * width + block_dim - 1) / block_dim; if (sizeof(El::Int) > sizeof(unsigned int) && grid_dim > std::numeric_limits<uint32_t>::max()) { grid_dim = std::numeric_limits<uint32_t>::max(); } //Launch GPU kernel if (grid_dim > 0) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(dx2), gpu::get_sync_info(dx1), gpu::get_sync_info(dy), gpu::get_sync_info(x2), gpu::get_sync_info(x1)); hydrogen::gpu::LaunchKernel( binary_backprop_operator_kernel<Op, TensorDataType>, grid_dim, block_dim, 0, multisync, height, width, x1.LockedBuffer(), x1.LDim(), x2.LockedBuffer(), x2.LDim(), dy.LockedBuffer(), dy.LDim(), dx1.Buffer(), dx1.LDim(), dx2.Buffer(), dx2.LDim()); } } // ========================================================= // Operator objects for entry-wise binary layers // ========================================================= // Note: Binary operator corresponds to forward prop step // (\f$ y = f(x_1,x_2) \f$) and 5-ary operator corresponds // to back prop step // (\f$ \frac{dL}{dx_i} = \frac{dL}{dy} \frac{df}{dx_i}(x_1,x_2) \f$). /** Binary cross entropy operator. */ template <typename TensorDataType> struct binary_cross_entropy_op { inline __device__ TensorDataType operator()(const TensorDataType& x1, const TensorDataType& x2) const { const TensorDataType zero = 0.; const TensorDataType one = 1.; TensorDataType y = zero; if (x2 > zero) { y += -x2 * gpu_lib::log(x1); } if (x2 < one) { y += -(one-x2) * gpu_lib::log(one-x1); } return y; } inline __device__ void operator()(const TensorDataType& x1, const TensorDataType& x2, const TensorDataType& dy, TensorDataType& dx1, TensorDataType& dx2) const { const TensorDataType zero = 0.; const TensorDataType one = 1.; dx1 = zero; dx2 = zero; if (dy == zero) { return; } if (x2 > zero) { dx1 += -x2 / x1 * dy; dx2 += -gpu_lib::log(x1) * dy; } if (x2 < one) { dx1 += (one-x2) / (one-x1) * dy; dx2 += gpu_lib::log(one-x1) * dy; } } }; /** Sigmoid binary cross entropy operator. * Equivalent to applying a sigmoid function to the first operand and * then computing the binary cross entropy. Numerically stable * implementation is taken from * https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits. */ template <typename TensorDataType> struct sigmoid_binary_cross_entropy_op { inline __device__ TensorDataType operator()(const TensorDataType& x1, const TensorDataType& x2) const { const TensorDataType zero = 0.; const TensorDataType one = 1.; const auto& z = gpu_lib::max(zero, gpu_lib::min(x2, one)); if (x1 > zero) { return (one - z) * x1 + gpu_lib::log1p(gpu_lib::exp(-x1)); } else { return - x1 * z + gpu_lib::log1p(gpu_lib::exp(x1)); } } inline __device__ void operator()(const TensorDataType& x1, const TensorDataType& x2, const TensorDataType& dy, TensorDataType& dx1, TensorDataType& dx2) const { const TensorDataType zero = 0.; const TensorDataType one = 1.; const auto& z = gpu_lib::max(zero, gpu_lib::min(x2, one)); if (x1 > zero) { dx1 = -z + one / (one + gpu_lib::exp(-x1)); } else { dx1 = one - z - one / (one + gpu_lib::exp(x1)); } dx1 *= dy; dx2 = (x2 == z) ? -x1 * dy : zero; } }; /** Boolean accuracy operator. */ template <typename TensorDataType> struct boolean_accuracy_op { inline __device__ TensorDataType operator()(const TensorDataType& x1, const TensorDataType& x2) const { const auto& b1 = x1 >= TensorDataType(0.5); const auto& b2 = x2 >= TensorDataType(0.5); return b1 == b2 ? TensorDataType(1.0) : TensorDataType(0.0); } inline __device__ void operator()(const TensorDataType& x1, const TensorDataType& x2, const TensorDataType& dy, TensorDataType& dx1, TensorDataType& dx2) const { dx1 = TensorDataType(0.0); dx2 = TensorDataType(0.0); } }; /** Boolean false negative operator. */ template <typename TensorDataType> struct boolean_false_negative_op { inline __device__ TensorDataType operator()(const TensorDataType& x1, const TensorDataType& x2) const { const auto& b1 = x1 >= TensorDataType(0.5); const auto& b2 = x2 >= TensorDataType(0.5); return (!b1 && b2) ? TensorDataType(1.0) : TensorDataType(0.0); } inline __device__ void operator()(const TensorDataType& x1, const TensorDataType& x2, const TensorDataType& dy, TensorDataType& dx1, TensorDataType& dx2) const { dx1 = TensorDataType(0.0); dx2 = TensorDataType(0.0); } }; /** Boolean false positive operator. */ template <typename TensorDataType> struct boolean_false_positive_op { inline __device__ TensorDataType operator()(const TensorDataType& x1, const TensorDataType& x2) const { const auto& b1 = x1 >= TensorDataType(0.5); const auto& b2 = x2 >= TensorDataType(0.5); return (b1 && !b2) ? TensorDataType(1.0) : TensorDataType(0.0); } inline __device__ void operator()(const TensorDataType& x1, const TensorDataType& x2, const TensorDataType& dy, TensorDataType& dx1, TensorDataType& dx2) const { dx1 = TensorDataType(0.0); dx2 = TensorDataType(0.0); } }; } // namespace // Template instantiation #define DEFINE_COMPUTE_OPS(layer, op) \ template <typename TensorDataType, data_layout Layout, El::Device Device> \ void layer<TensorDataType, Layout, Device>::fp_compute() { \ gpu_lib::apply_entrywise_binary_operator<op>( \ this->get_prev_activations(0), \ this->get_prev_activations(1), \ this->get_activations()); \ } \ template <typename TensorDataType, data_layout Layout, El::Device Device> \ void layer<TensorDataType, Layout, Device>::bp_compute() { \ apply_binary_backprop_operator<op>( \ this->get_local_prev_activations(0), \ this->get_local_prev_activations(1), \ this->get_local_prev_error_signals(), \ this->get_local_error_signals(0), \ this->get_local_error_signals(1)); \ } \ DEFINE_COMPUTE_OPS(binary_cross_entropy_layer, binary_cross_entropy_op) DEFINE_COMPUTE_OPS(sigmoid_binary_cross_entropy_layer, sigmoid_binary_cross_entropy_op) DEFINE_COMPUTE_OPS(boolean_accuracy_layer, boolean_accuracy_op) DEFINE_COMPUTE_OPS(boolean_false_negative_layer, boolean_false_negative_op) DEFINE_COMPUTE_OPS(boolean_false_positive_layer, boolean_false_positive_op) #define PROTO(T) \ BINARY_ETI_INST_MACRO_DEV_DT(binary_cross_entropy_layer, T, El::Device::GPU); \ BINARY_ETI_INST_MACRO_DEV_DT(sigmoid_binary_cross_entropy_layer, T, El::Device::GPU); \ BINARY_ETI_INST_MACRO_DEV_DT(boolean_accuracy_layer, T, El::Device::GPU); \ BINARY_ETI_INST_MACRO_DEV_DT(boolean_false_negative_layer, T, El::Device::GPU); \ BINARY_ETI_INST_MACRO_DEV_DT(boolean_false_positive_layer, T, El::Device::GPU) #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
e620ad3e9408ced7043f70c740268c775c6785bf.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_ENTRYWISE_LAYER_INSTANTIATE #include "lbann/layers/loss/entrywise.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { /** GPU kernel to apply an binary backprop operator. */ template <template <typename> class BinaryBackPropOperator, typename TensorDataType> __global__ void binary_backprop_operator_kernel(El::Int height, El::Int width, const TensorDataType* __restrict__ x1, El::Int x1_ldim, const TensorDataType* __restrict__ x2, El::Int x2_ldim, const TensorDataType* __restrict__ dy, El::Int dy_ldim, TensorDataType* __restrict__ dx1, El::Int dx1_ldim, TensorDataType* __restrict__ dx2, El::Int dx2_ldim) { const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; const El::Int size = height * width; const El::Int num_threads = blockDim.x * gridDim.x; BinaryBackPropOperator<TensorDataType> op; for (El::Int pos = gid; pos < size; pos += num_threads) { const auto& row = pos % height; const auto& col = pos / height; op(x1[row + col * x1_ldim], x2[row + col * x2_ldim], dy[row + col * dy_ldim], dx1[row + col * dx1_ldim], dx2[row + col * dx2_ldim]); } } /** Apply a binary backprop operator to CPU data. * The input and output data must be on CPU and must have the same * dimensions. Given a binary function \f$ y = f(x_1,x_2) \f$, the * corresponding BinaryBackPropOperator is a 5-ary function with the * arguments \f$ x_1 \f$, \f$ x_2 \f$, \f$ dL/dy \f$, \f$ dL/dx_1\f$, * \f$ dL/dx_2 \f$. The last two arguments should be overwritten when * the BinaryBackPropOperator is called. */ template <template <typename> class Op, typename TensorDataType> void apply_binary_backprop_operator( const El::AbstractMatrix<TensorDataType>& x1, const El::AbstractMatrix<TensorDataType>& x2, const El::AbstractMatrix<TensorDataType>& dy, El::AbstractMatrix<TensorDataType>& dx1, El::AbstractMatrix<TensorDataType>& dx2) { // Get CUDA grid dimensions // Note: Maximum CUDA grid dimension is 2^32-1 // (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications). // TODO: HIP/ROCM notes const El::Int height = x1.Height(); const El::Int width = x1.Width(); const El::Int block_dim = 256; El::Int grid_dim = (height * width + block_dim - 1) / block_dim; if (sizeof(El::Int) > sizeof(unsigned int) && grid_dim > std::numeric_limits<uint32_t>::max()) { grid_dim = std::numeric_limits<uint32_t>::max(); } //Launch GPU kernel if (grid_dim > 0) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(dx2), gpu::get_sync_info(dx1), gpu::get_sync_info(dy), gpu::get_sync_info(x2), gpu::get_sync_info(x1)); hydrogen::gpu::LaunchKernel( binary_backprop_operator_kernel<Op, TensorDataType>, grid_dim, block_dim, 0, multisync, height, width, x1.LockedBuffer(), x1.LDim(), x2.LockedBuffer(), x2.LDim(), dy.LockedBuffer(), dy.LDim(), dx1.Buffer(), dx1.LDim(), dx2.Buffer(), dx2.LDim()); } } // ========================================================= // Operator objects for entry-wise binary layers // ========================================================= // Note: Binary operator corresponds to forward prop step // (\f$ y = f(x_1,x_2) \f$) and 5-ary operator corresponds // to back prop step // (\f$ \frac{dL}{dx_i} = \frac{dL}{dy} \frac{df}{dx_i}(x_1,x_2) \f$). /** Binary cross entropy operator. */ template <typename TensorDataType> struct binary_cross_entropy_op { inline __device__ TensorDataType operator()(const TensorDataType& x1, const TensorDataType& x2) const { const TensorDataType zero = 0.; const TensorDataType one = 1.; TensorDataType y = zero; if (x2 > zero) { y += -x2 * gpu_lib::log(x1); } if (x2 < one) { y += -(one-x2) * gpu_lib::log(one-x1); } return y; } inline __device__ void operator()(const TensorDataType& x1, const TensorDataType& x2, const TensorDataType& dy, TensorDataType& dx1, TensorDataType& dx2) const { const TensorDataType zero = 0.; const TensorDataType one = 1.; dx1 = zero; dx2 = zero; if (dy == zero) { return; } if (x2 > zero) { dx1 += -x2 / x1 * dy; dx2 += -gpu_lib::log(x1) * dy; } if (x2 < one) { dx1 += (one-x2) / (one-x1) * dy; dx2 += gpu_lib::log(one-x1) * dy; } } }; /** Sigmoid binary cross entropy operator. * Equivalent to applying a sigmoid function to the first operand and * then computing the binary cross entropy. Numerically stable * implementation is taken from * https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits. */ template <typename TensorDataType> struct sigmoid_binary_cross_entropy_op { inline __device__ TensorDataType operator()(const TensorDataType& x1, const TensorDataType& x2) const { const TensorDataType zero = 0.; const TensorDataType one = 1.; const auto& z = gpu_lib::max(zero, gpu_lib::min(x2, one)); if (x1 > zero) { return (one - z) * x1 + gpu_lib::log1p(gpu_lib::exp(-x1)); } else { return - x1 * z + gpu_lib::log1p(gpu_lib::exp(x1)); } } inline __device__ void operator()(const TensorDataType& x1, const TensorDataType& x2, const TensorDataType& dy, TensorDataType& dx1, TensorDataType& dx2) const { const TensorDataType zero = 0.; const TensorDataType one = 1.; const auto& z = gpu_lib::max(zero, gpu_lib::min(x2, one)); if (x1 > zero) { dx1 = -z + one / (one + gpu_lib::exp(-x1)); } else { dx1 = one - z - one / (one + gpu_lib::exp(x1)); } dx1 *= dy; dx2 = (x2 == z) ? -x1 * dy : zero; } }; /** Boolean accuracy operator. */ template <typename TensorDataType> struct boolean_accuracy_op { inline __device__ TensorDataType operator()(const TensorDataType& x1, const TensorDataType& x2) const { const auto& b1 = x1 >= TensorDataType(0.5); const auto& b2 = x2 >= TensorDataType(0.5); return b1 == b2 ? TensorDataType(1.0) : TensorDataType(0.0); } inline __device__ void operator()(const TensorDataType& x1, const TensorDataType& x2, const TensorDataType& dy, TensorDataType& dx1, TensorDataType& dx2) const { dx1 = TensorDataType(0.0); dx2 = TensorDataType(0.0); } }; /** Boolean false negative operator. */ template <typename TensorDataType> struct boolean_false_negative_op { inline __device__ TensorDataType operator()(const TensorDataType& x1, const TensorDataType& x2) const { const auto& b1 = x1 >= TensorDataType(0.5); const auto& b2 = x2 >= TensorDataType(0.5); return (!b1 && b2) ? TensorDataType(1.0) : TensorDataType(0.0); } inline __device__ void operator()(const TensorDataType& x1, const TensorDataType& x2, const TensorDataType& dy, TensorDataType& dx1, TensorDataType& dx2) const { dx1 = TensorDataType(0.0); dx2 = TensorDataType(0.0); } }; /** Boolean false positive operator. */ template <typename TensorDataType> struct boolean_false_positive_op { inline __device__ TensorDataType operator()(const TensorDataType& x1, const TensorDataType& x2) const { const auto& b1 = x1 >= TensorDataType(0.5); const auto& b2 = x2 >= TensorDataType(0.5); return (b1 && !b2) ? TensorDataType(1.0) : TensorDataType(0.0); } inline __device__ void operator()(const TensorDataType& x1, const TensorDataType& x2, const TensorDataType& dy, TensorDataType& dx1, TensorDataType& dx2) const { dx1 = TensorDataType(0.0); dx2 = TensorDataType(0.0); } }; } // namespace // Template instantiation #define DEFINE_COMPUTE_OPS(layer, op) \ template <typename TensorDataType, data_layout Layout, El::Device Device> \ void layer<TensorDataType, Layout, Device>::fp_compute() { \ gpu_lib::apply_entrywise_binary_operator<op>( \ this->get_prev_activations(0), \ this->get_prev_activations(1), \ this->get_activations()); \ } \ template <typename TensorDataType, data_layout Layout, El::Device Device> \ void layer<TensorDataType, Layout, Device>::bp_compute() { \ apply_binary_backprop_operator<op>( \ this->get_local_prev_activations(0), \ this->get_local_prev_activations(1), \ this->get_local_prev_error_signals(), \ this->get_local_error_signals(0), \ this->get_local_error_signals(1)); \ } \ DEFINE_COMPUTE_OPS(binary_cross_entropy_layer, binary_cross_entropy_op) DEFINE_COMPUTE_OPS(sigmoid_binary_cross_entropy_layer, sigmoid_binary_cross_entropy_op) DEFINE_COMPUTE_OPS(boolean_accuracy_layer, boolean_accuracy_op) DEFINE_COMPUTE_OPS(boolean_false_negative_layer, boolean_false_negative_op) DEFINE_COMPUTE_OPS(boolean_false_positive_layer, boolean_false_positive_op) #define PROTO(T) \ BINARY_ETI_INST_MACRO_DEV_DT(binary_cross_entropy_layer, T, El::Device::GPU); \ BINARY_ETI_INST_MACRO_DEV_DT(sigmoid_binary_cross_entropy_layer, T, El::Device::GPU); \ BINARY_ETI_INST_MACRO_DEV_DT(boolean_accuracy_layer, T, El::Device::GPU); \ BINARY_ETI_INST_MACRO_DEV_DT(boolean_false_negative_layer, T, El::Device::GPU); \ BINARY_ETI_INST_MACRO_DEV_DT(boolean_false_positive_layer, T, El::Device::GPU) #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
5bdef2b4dab1b3d4cf23d966267569a99763964e.hip
// !!! This is a file automatically generated by hipify!!! /** * correlation.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include "../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 1.05 #define GPU_DEVICE 0 /* Problem size */ //default is 2048, sclares upto 8192 //#define M 8192 #define M 2048 //#define N 8192 #define N 2048 /* Thread block dimensions for kernel 1*/ #define DIM_THREAD_BLOCK_KERNEL_1_X 256 #define DIM_THREAD_BLOCK_KERNEL_1_Y 1 /* Thread block dimensions for kernel 2*/ #define DIM_THREAD_BLOCK_KERNEL_2_X 256 #define DIM_THREAD_BLOCK_KERNEL_2_Y 1 /* Thread block dimensions for kernel 3*/ #define DIM_THREAD_BLOCK_KERNEL_3_X 32 #define DIM_THREAD_BLOCK_KERNEL_3_Y 8 /* Thread block dimensions for kernel 4*/ #define DIM_THREAD_BLOCK_KERNEL_4_X 256 #define DIM_THREAD_BLOCK_KERNEL_4_Y 1 #define sqrt_of_array_cell(x,j) sqrt(x[j]) #define FLOAT_N 3214212.01f #define EPS 0.005f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE* data) { int i, j; for (i=0; i < (M+1); i++) { for (j=0; j< (N+1); j++) { data[i*(N+1) + j] = ((DATA_TYPE) i*j)/ (M+1); } } } void correlation(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat) { int i, j, j1, j2; // Determine mean of column vectors of input data matrix for (j = 1; j < (M+1); j++) { mean[j] = 0.0; for (i = 1; i < (N+1); i++) { mean[j] += data[i*(M+1) + j]; } mean[j] /= (DATA_TYPE)FLOAT_N; } // Determine standard deviations of column vectors of data matrix. for (j = 1; j < (M+1); j++) { stddev[j] = 0.0; for (i = 1; i < (N+1); i++) { stddev[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]); } stddev[j] /= FLOAT_N; stddev[j] = sqrt_of_array_cell(stddev, j); stddev[j] = stddev[j] <= EPS ? 1.0 : stddev[j]; } // Center and reduce the column vectors. for (i = 1; i < (N+1); i++) { for (j = 1; j < (M+1); j++) { data[i*(M+1) + j] -= mean[j]; data[i*(M+1) + j] /= (sqrt(FLOAT_N)*stddev[j]) ; } } // Calculate the m * m correlation matrix. for (j1 = 1; j1 < M; j1++) { symmat[j1*(M+1) + j1] = 1.0; for (j2 = j1+1; j2 < (M+1); j2++) { symmat[j1*(M+1) + j2] = 0.0; for (i = 1; i < (N+1); i++) { symmat[j1*(M+1) + j2] += (data[i*(M+1) + j1] * data[i*(M+1) + j2]); } symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2]; } } symmat[M*(M+1) + M] = 1.0; } void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu) { int i,j,fail; fail = 0; for (i=1; i < (M+1); i++) { for (j=1; j < (N+1); j++) { if (percentDiff(symmat[i*(N+1) + j], symmat_outputFromGpu[i*(N+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; printf("i: %d j: %d\n1: %f 2: %f\n", i, j, symmat[i*N + j], symmat_outputFromGpu[i*N + j]); } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; if ((j >= 1) && (j < (M+1))) { mean[j] = 0.0; int i; for(i=1; i < (N+1); i++) { mean[j] += data[i*(M+1) + j]; } mean[j] /= (DATA_TYPE)FLOAT_N; } } __global__ void std_kernel(DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; if ((j >= 1) && (j < (M+1))) { std[j] = 0.0; int i; for(i = 1; i < (N+1); i++) { std[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]); } std[j] /= (FLOAT_N); std[j] = sqrt(std[j]); if(std[j] <= EPS) { std[j] = 1.0; } } } __global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; int i = blockIdx.y * blockDim.y + threadIdx.y + 1; if ((i >= 1) && (i < (N+1)) && (j >= 1) && (j < (M+1))) { data[i*(M+1) + j] -= mean[j]; data[i*(M+1) + j] /= (sqrt(FLOAT_N) * std[j]); } } __global__ void corr_kernel(DATA_TYPE *symmat, DATA_TYPE *data) { int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1; int i, j2; if ((j1 >= 1) && (j1 < M)) { symmat[j1*(M+1) + j1] = 1.0; for (j2 = (j1 + 1); j2 < (M+1); j2++) { symmat[j1*(M+1) + j2] = 0.0; for(i = 1; i < (N+1); i++) { symmat[j1*(M+1) + j2] += data[i*(M+1) + j1] * data[i*(M+1) + j2]; } symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2]; } } } void correlationCuda(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu) { double t_start, t_end; DATA_TYPE *data_gpu; DATA_TYPE *stddev_gpu; DATA_TYPE *mean_gpu; DATA_TYPE *symmat_gpu; hipMalloc((void **)&data_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1)); hipMalloc((void **)&symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1)); hipMalloc((void **)&stddev_gpu, sizeof(DATA_TYPE) * (M+1)); hipMalloc((void **)&mean_gpu, sizeof(DATA_TYPE) * (M+1)); hipMemcpy(data_gpu, data, sizeof(DATA_TYPE) * (M+1) * (N+1), hipMemcpyHostToDevice); hipMemcpy(symmat_gpu, symmat, sizeof(DATA_TYPE) * (M+1) * (N+1), hipMemcpyHostToDevice); hipMemcpy(stddev_gpu, stddev, sizeof(DATA_TYPE) * (M+1), hipMemcpyHostToDevice); hipMemcpy(mean_gpu, mean, sizeof(DATA_TYPE) * (M+1), hipMemcpyHostToDevice); dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y); dim3 grid1((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1); dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y); dim3 grid2((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), 1); dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y); dim3 grid3((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), (size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_Y))); dim3 block4(DIM_THREAD_BLOCK_KERNEL_4_X, DIM_THREAD_BLOCK_KERNEL_4_Y); dim3 grid4((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_4_X)), 1); t_start = rtclock(); hipLaunchKernelGGL(( mean_kernel), dim3(grid1), dim3(block1) , 0, 0, mean_gpu,data_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( std_kernel), dim3(grid2), dim3(block2) , 0, 0, mean_gpu,stddev_gpu,data_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( reduce_kernel), dim3(grid3), dim3(block3) , 0, 0, mean_gpu,stddev_gpu,data_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( corr_kernel), dim3(grid4), dim3(block4) , 0, 0, symmat_gpu,data_gpu); hipDeviceSynchronize(); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); DATA_TYPE valueAtSymmatIndexMTimesMPlus1PlusMPoint = 1.0; hipMemcpy(&(symmat_gpu[(M)*(M+1) + (M)]), &valueAtSymmatIndexMTimesMPlus1PlusMPoint, sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMemcpy(symmat_outputFromGpu, symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1), hipMemcpyDeviceToHost); hipFree(data_gpu); hipFree(symmat_gpu); hipFree(stddev_gpu); hipFree(mean_gpu); } int main() { double t_start, t_end; DATA_TYPE* data; DATA_TYPE* mean; DATA_TYPE* stddev; DATA_TYPE* symmat; DATA_TYPE* symmat_outputFromGpu; data = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE)); mean = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE)); stddev = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE)); symmat = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE)); symmat_outputFromGpu = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE)); init_arrays(data); GPU_argv_init(); correlationCuda(data, mean, stddev, symmat, symmat_outputFromGpu); t_start = rtclock(); // correlation(data, mean, stddev, symmat); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); // compareResults(symmat, symmat_outputFromGpu); free(data); free(mean); free(stddev); free(symmat); free(symmat_outputFromGpu); return 0; }
5bdef2b4dab1b3d4cf23d966267569a99763964e.cu
/** * correlation.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <sys/time.h> #include <cuda.h> #include "../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 1.05 #define GPU_DEVICE 0 /* Problem size */ //default is 2048, sclares upto 8192 //#define M 8192 #define M 2048 //#define N 8192 #define N 2048 /* Thread block dimensions for kernel 1*/ #define DIM_THREAD_BLOCK_KERNEL_1_X 256 #define DIM_THREAD_BLOCK_KERNEL_1_Y 1 /* Thread block dimensions for kernel 2*/ #define DIM_THREAD_BLOCK_KERNEL_2_X 256 #define DIM_THREAD_BLOCK_KERNEL_2_Y 1 /* Thread block dimensions for kernel 3*/ #define DIM_THREAD_BLOCK_KERNEL_3_X 32 #define DIM_THREAD_BLOCK_KERNEL_3_Y 8 /* Thread block dimensions for kernel 4*/ #define DIM_THREAD_BLOCK_KERNEL_4_X 256 #define DIM_THREAD_BLOCK_KERNEL_4_Y 1 #define sqrt_of_array_cell(x,j) sqrt(x[j]) #define FLOAT_N 3214212.01f #define EPS 0.005f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE* data) { int i, j; for (i=0; i < (M+1); i++) { for (j=0; j< (N+1); j++) { data[i*(N+1) + j] = ((DATA_TYPE) i*j)/ (M+1); } } } void correlation(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat) { int i, j, j1, j2; // Determine mean of column vectors of input data matrix for (j = 1; j < (M+1); j++) { mean[j] = 0.0; for (i = 1; i < (N+1); i++) { mean[j] += data[i*(M+1) + j]; } mean[j] /= (DATA_TYPE)FLOAT_N; } // Determine standard deviations of column vectors of data matrix. for (j = 1; j < (M+1); j++) { stddev[j] = 0.0; for (i = 1; i < (N+1); i++) { stddev[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]); } stddev[j] /= FLOAT_N; stddev[j] = sqrt_of_array_cell(stddev, j); stddev[j] = stddev[j] <= EPS ? 1.0 : stddev[j]; } // Center and reduce the column vectors. for (i = 1; i < (N+1); i++) { for (j = 1; j < (M+1); j++) { data[i*(M+1) + j] -= mean[j]; data[i*(M+1) + j] /= (sqrt(FLOAT_N)*stddev[j]) ; } } // Calculate the m * m correlation matrix. for (j1 = 1; j1 < M; j1++) { symmat[j1*(M+1) + j1] = 1.0; for (j2 = j1+1; j2 < (M+1); j2++) { symmat[j1*(M+1) + j2] = 0.0; for (i = 1; i < (N+1); i++) { symmat[j1*(M+1) + j2] += (data[i*(M+1) + j1] * data[i*(M+1) + j2]); } symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2]; } } symmat[M*(M+1) + M] = 1.0; } void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu) { int i,j,fail; fail = 0; for (i=1; i < (M+1); i++) { for (j=1; j < (N+1); j++) { if (percentDiff(symmat[i*(N+1) + j], symmat_outputFromGpu[i*(N+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; printf("i: %d j: %d\n1: %f 2: %f\n", i, j, symmat[i*N + j], symmat_outputFromGpu[i*N + j]); } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; if ((j >= 1) && (j < (M+1))) { mean[j] = 0.0; int i; for(i=1; i < (N+1); i++) { mean[j] += data[i*(M+1) + j]; } mean[j] /= (DATA_TYPE)FLOAT_N; } } __global__ void std_kernel(DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; if ((j >= 1) && (j < (M+1))) { std[j] = 0.0; int i; for(i = 1; i < (N+1); i++) { std[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]); } std[j] /= (FLOAT_N); std[j] = sqrt(std[j]); if(std[j] <= EPS) { std[j] = 1.0; } } } __global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; int i = blockIdx.y * blockDim.y + threadIdx.y + 1; if ((i >= 1) && (i < (N+1)) && (j >= 1) && (j < (M+1))) { data[i*(M+1) + j] -= mean[j]; data[i*(M+1) + j] /= (sqrt(FLOAT_N) * std[j]); } } __global__ void corr_kernel(DATA_TYPE *symmat, DATA_TYPE *data) { int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1; int i, j2; if ((j1 >= 1) && (j1 < M)) { symmat[j1*(M+1) + j1] = 1.0; for (j2 = (j1 + 1); j2 < (M+1); j2++) { symmat[j1*(M+1) + j2] = 0.0; for(i = 1; i < (N+1); i++) { symmat[j1*(M+1) + j2] += data[i*(M+1) + j1] * data[i*(M+1) + j2]; } symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2]; } } } void correlationCuda(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu) { double t_start, t_end; DATA_TYPE *data_gpu; DATA_TYPE *stddev_gpu; DATA_TYPE *mean_gpu; DATA_TYPE *symmat_gpu; cudaMalloc((void **)&data_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1)); cudaMalloc((void **)&symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1)); cudaMalloc((void **)&stddev_gpu, sizeof(DATA_TYPE) * (M+1)); cudaMalloc((void **)&mean_gpu, sizeof(DATA_TYPE) * (M+1)); cudaMemcpy(data_gpu, data, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyHostToDevice); cudaMemcpy(symmat_gpu, symmat, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyHostToDevice); cudaMemcpy(stddev_gpu, stddev, sizeof(DATA_TYPE) * (M+1), cudaMemcpyHostToDevice); cudaMemcpy(mean_gpu, mean, sizeof(DATA_TYPE) * (M+1), cudaMemcpyHostToDevice); dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y); dim3 grid1((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1); dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y); dim3 grid2((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), 1); dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y); dim3 grid3((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), (size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_Y))); dim3 block4(DIM_THREAD_BLOCK_KERNEL_4_X, DIM_THREAD_BLOCK_KERNEL_4_Y); dim3 grid4((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_4_X)), 1); t_start = rtclock(); mean_kernel<<< grid1, block1 >>>(mean_gpu,data_gpu); cudaThreadSynchronize(); std_kernel<<< grid2, block2 >>>(mean_gpu,stddev_gpu,data_gpu); cudaThreadSynchronize(); reduce_kernel<<< grid3, block3 >>>(mean_gpu,stddev_gpu,data_gpu); cudaThreadSynchronize(); corr_kernel<<< grid4, block4 >>>(symmat_gpu,data_gpu); cudaThreadSynchronize(); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); DATA_TYPE valueAtSymmatIndexMTimesMPlus1PlusMPoint = 1.0; cudaMemcpy(&(symmat_gpu[(M)*(M+1) + (M)]), &valueAtSymmatIndexMTimesMPlus1PlusMPoint, sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMemcpy(symmat_outputFromGpu, symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyDeviceToHost); cudaFree(data_gpu); cudaFree(symmat_gpu); cudaFree(stddev_gpu); cudaFree(mean_gpu); } int main() { double t_start, t_end; DATA_TYPE* data; DATA_TYPE* mean; DATA_TYPE* stddev; DATA_TYPE* symmat; DATA_TYPE* symmat_outputFromGpu; data = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE)); mean = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE)); stddev = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE)); symmat = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE)); symmat_outputFromGpu = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE)); init_arrays(data); GPU_argv_init(); correlationCuda(data, mean, stddev, symmat, symmat_outputFromGpu); t_start = rtclock(); // correlation(data, mean, stddev, symmat); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); // compareResults(symmat, symmat_outputFromGpu); free(data); free(mean); free(stddev); free(symmat); free(symmat_outputFromGpu); return 0; }
a31c3b1b19931e29b5985ab7c390957e752f072e.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <sys/time.h> #define CHECK(call) { \ hipError_t err; \ if ( (err = (call)) != hipSuccess) { \ fprintf(stderr, "Got error %s at %s:%d\n", hipGetErrorString(err), \ __FILE__, __LINE__); \ exit(1); \ } \ } __global__ void kernel2(int *a, int *b, int c) { int tx = threadIdx.x; switch (tx) { case 0: a[tx] = a[tx] + 2; break; case 1: a[tx] = a[tx] + 3; break; default: break; } } __global__ void kernel(float *g_data, float value) { int idx = blockIdx.x * blockDim.x + threadIdx.x; g_data[idx] = g_data[idx] + value; // printf("%f+g_data[%d]=%f\n", value, idx, g_data[idx]); } int checkResult(float *data, const int n, const float x) { for (int i = 0; i < n; i++) { if (data[i] != x) { printf("Error! data[%d] = %f, ref = %f\n", i, data[i], x); return 0; } } return 1; } void test() { printf("sizeof(hipEvent_t)=%lu\n", sizeof(hipEvent_t)); printf("sizeof(hipStream_t)=%lu\n", sizeof(hipStream_t)); printf("sizeof(hipError_t)=%lu\n", sizeof(hipError_t)); printf("sizeof(uint64_t)=%lu\n", sizeof(uint64_t)); printf("sizeof(uint32_t)=%lu\n", sizeof(uint32_t)); } #define TIMING static double tvsub(struct timeval start, struct timeval end) { return (double)(end.tv_usec - start.tv_usec)/1000000 + (double)(end.tv_sec - start.tv_sec); } int main() { /* test case 1 * for __cudaRegisterFatBinary __cudaUnregisterFatBinary __cudaRegisterFunction */ // return 0; int devID=1; int count = 0; struct hipDeviceProp_t props; float *d_a=0; float *h_a=0; dim3 block, grid; int num = 1 << 24; int nbytes = num * sizeof(float); int value=41; struct timeval malloc_start, malloc_end; struct timeval meminit_start, meminit_end; struct timeval free_start, free_end; struct timeval d_malloc_start, d_malloc_end; struct timeval d_meminit_start, d_meminit_end; struct timeval d_free_start, d_free_end; struct timeval HtoD_start, HtoD_end; struct timeval DtoH_start, DtoH_end; struct timeval kernel_start, kernel_end; struct timeval total_start, total_end; //test(); /* test case 2 * add hipGetDeviceCount hipGetDevice hipGetDeviceProperties */ devID = 0; CHECK(hipSetDevice(devID)); CHECK(hipGetDeviceCount(&count)); printf("cuda count=%d\n", count); // CHECK(hipGetDevice(&devID)); CHECK(hipGetDeviceProperties(&props, devID)); printf("Device %d: \"%s\" with Compute %d.%d capability\n",devID, props.name, props.major, props.minor); // printf("num 0x%x\n", num); #ifdef TIMING gettimeofday(&total_start, NULL); #endif printf("sending 0x%x\n", nbytes); printf("allocating 0x%x\n", nbytes); #ifdef TIMING gettimeofday(&malloc_start, NULL); #endif h_a=(float*)malloc(nbytes); #ifdef TIMING gettimeofday(&malloc_end, NULL); #endif printf("initing mem\n"); #ifdef TIMING gettimeofday(&meminit_start, NULL); #endif memset(h_a, 0, nbytes); #ifdef TIMING gettimeofday(&meminit_end, NULL); #endif printf("h_a=%p\n", h_a); // h_a[0] = 1; // start #ifdef TIMING gettimeofday(&d_malloc_start, NULL); #endif CHECK(hipMalloc((void**)&d_a, nbytes)); #ifdef TIMING gettimeofday(&d_malloc_end, NULL); #endif printf("d_a address = %p\n", d_a); #ifdef TIMING gettimeofday(&d_meminit_start, NULL); #endif CHECK(hipMemset(d_a, 0, nbytes)); #ifdef TIMING gettimeofday(&d_meminit_end, NULL); #endif // set kernel launch configuration block = dim3(4); grid = dim3((num + block.x - 1) / block.x); #ifdef TIMING gettimeofday(&HtoD_start, NULL); #endif // CHECK(hipMemcpy(d_a, h_a, nbytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_a, h_a, nbytes, hipMemcpyDefault)); #ifdef TIMING gettimeofday(&HtoD_end, NULL); #endif #ifdef TIMING gettimeofday(&kernel_start, NULL); #endif hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, d_a, value); #ifdef TIMING gettimeofday(&kernel_end, NULL); #endif #ifdef TIMING gettimeofday(&DtoH_start, NULL); #endif // CHECK(hipMemcpy(h_a, d_a, nbytes, hipMemcpyDeviceToHost)); CHECK(hipMemcpy(h_a, d_a, nbytes, hipMemcpyDefault)); #ifdef TIMING gettimeofday(&DtoH_end, NULL); #endif bool bFinalResults = (bool) checkResult(h_a, num, value); printf("result:%s\n", bFinalResults? "PASS" : "FAILED"); // end #ifdef TIMING gettimeofday(&d_free_start, NULL); #endif CHECK(hipFree(d_a)); #ifdef TIMING gettimeofday(&d_free_end, NULL); #endif #ifdef TIMING gettimeofday(&free_start, NULL); #endif free(h_a); #ifdef TIMING gettimeofday(&free_end, NULL); #endif /* test case 3 * add hipMalloc hipMemset hipMemcpy hipLaunch hipFree */ #ifdef TIMING gettimeofday(&total_end, NULL); double total_time = tvsub(total_start, total_end); double malloc_time = tvsub(malloc_start, malloc_end); double meminit_time = tvsub(meminit_start, meminit_end); double free_time = tvsub(free_start, free_end); double d_malloc_time = tvsub(d_malloc_start, d_malloc_end); double d_meminit_time = tvsub(d_meminit_start, d_meminit_end); double d_free_time = tvsub(d_free_start, d_free_end); double HtoD_time = tvsub(HtoD_start, HtoD_end); double DtoH_time = tvsub(DtoH_start, DtoH_end); double kernel_time = tvsub(kernel_start, kernel_end); printf("================\n"); printf("total_time : \t\t%f\n", total_time); printf("host malloc: \t\t%f\n", malloc_time); printf("host mem init: \t\t%f\n", meminit_time); printf("device malloc: \t\t%f\n", d_malloc_time); printf("device mem init: \t%f\n", d_meminit_time); printf("HtoD: \t\t\t%f\n", HtoD_time); printf("Exec: \t\t\t%f\n", kernel_time); printf("DtoH: \t\t\t%f\n", DtoH_time); printf("device free: \t\t%f\n", d_free_time); printf("host free: \t\t%f\n", free_time); printf("================\n"); #endif return EXIT_SUCCESS; }
a31c3b1b19931e29b5985ab7c390957e752f072e.cu
#include <cuda.h> #include <stdio.h> #include <sys/time.h> #define CHECK(call) { \ cudaError_t err; \ if ( (err = (call)) != cudaSuccess) { \ fprintf(stderr, "Got error %s at %s:%d\n", cudaGetErrorString(err), \ __FILE__, __LINE__); \ exit(1); \ } \ } __global__ void kernel2(int *a, int *b, int c) { int tx = threadIdx.x; switch (tx) { case 0: a[tx] = a[tx] + 2; break; case 1: a[tx] = a[tx] + 3; break; default: break; } } __global__ void kernel(float *g_data, float value) { int idx = blockIdx.x * blockDim.x + threadIdx.x; g_data[idx] = g_data[idx] + value; // printf("%f+g_data[%d]=%f\n", value, idx, g_data[idx]); } int checkResult(float *data, const int n, const float x) { for (int i = 0; i < n; i++) { if (data[i] != x) { printf("Error! data[%d] = %f, ref = %f\n", i, data[i], x); return 0; } } return 1; } void test() { printf("sizeof(cudaEvent_t)=%lu\n", sizeof(cudaEvent_t)); printf("sizeof(cudaStream_t)=%lu\n", sizeof(cudaStream_t)); printf("sizeof(cudaError_t)=%lu\n", sizeof(cudaError_t)); printf("sizeof(uint64_t)=%lu\n", sizeof(uint64_t)); printf("sizeof(uint32_t)=%lu\n", sizeof(uint32_t)); } #define TIMING static double tvsub(struct timeval start, struct timeval end) { return (double)(end.tv_usec - start.tv_usec)/1000000 + (double)(end.tv_sec - start.tv_sec); } int main() { /* test case 1 * for __cudaRegisterFatBinary __cudaUnregisterFatBinary __cudaRegisterFunction */ // return 0; int devID=1; int count = 0; struct cudaDeviceProp props; float *d_a=0; float *h_a=0; dim3 block, grid; int num = 1 << 24; int nbytes = num * sizeof(float); int value=41; struct timeval malloc_start, malloc_end; struct timeval meminit_start, meminit_end; struct timeval free_start, free_end; struct timeval d_malloc_start, d_malloc_end; struct timeval d_meminit_start, d_meminit_end; struct timeval d_free_start, d_free_end; struct timeval HtoD_start, HtoD_end; struct timeval DtoH_start, DtoH_end; struct timeval kernel_start, kernel_end; struct timeval total_start, total_end; //test(); /* test case 2 * add cudaGetDeviceCount cudaGetDevice cudaGetDeviceProperties */ devID = 0; CHECK(cudaSetDevice(devID)); CHECK(cudaGetDeviceCount(&count)); printf("cuda count=%d\n", count); // CHECK(cudaGetDevice(&devID)); CHECK(cudaGetDeviceProperties(&props, devID)); printf("Device %d: \"%s\" with Compute %d.%d capability\n",devID, props.name, props.major, props.minor); // printf("num 0x%x\n", num); #ifdef TIMING gettimeofday(&total_start, NULL); #endif printf("sending 0x%x\n", nbytes); printf("allocating 0x%x\n", nbytes); #ifdef TIMING gettimeofday(&malloc_start, NULL); #endif h_a=(float*)malloc(nbytes); #ifdef TIMING gettimeofday(&malloc_end, NULL); #endif printf("initing mem\n"); #ifdef TIMING gettimeofday(&meminit_start, NULL); #endif memset(h_a, 0, nbytes); #ifdef TIMING gettimeofday(&meminit_end, NULL); #endif printf("h_a=%p\n", h_a); // h_a[0] = 1; // start #ifdef TIMING gettimeofday(&d_malloc_start, NULL); #endif CHECK(cudaMalloc((void**)&d_a, nbytes)); #ifdef TIMING gettimeofday(&d_malloc_end, NULL); #endif printf("d_a address = %p\n", d_a); #ifdef TIMING gettimeofday(&d_meminit_start, NULL); #endif CHECK(cudaMemset(d_a, 0, nbytes)); #ifdef TIMING gettimeofday(&d_meminit_end, NULL); #endif // set kernel launch configuration block = dim3(4); grid = dim3((num + block.x - 1) / block.x); #ifdef TIMING gettimeofday(&HtoD_start, NULL); #endif // CHECK(cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyDefault)); #ifdef TIMING gettimeofday(&HtoD_end, NULL); #endif #ifdef TIMING gettimeofday(&kernel_start, NULL); #endif kernel<<<grid, block>>>(d_a, value); #ifdef TIMING gettimeofday(&kernel_end, NULL); #endif #ifdef TIMING gettimeofday(&DtoH_start, NULL); #endif // CHECK(cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDefault)); #ifdef TIMING gettimeofday(&DtoH_end, NULL); #endif bool bFinalResults = (bool) checkResult(h_a, num, value); printf("result:%s\n", bFinalResults? "PASS" : "FAILED"); // end #ifdef TIMING gettimeofday(&d_free_start, NULL); #endif CHECK(cudaFree(d_a)); #ifdef TIMING gettimeofday(&d_free_end, NULL); #endif #ifdef TIMING gettimeofday(&free_start, NULL); #endif free(h_a); #ifdef TIMING gettimeofday(&free_end, NULL); #endif /* test case 3 * add cudaMalloc cudaMemset cudaMemcpy cudaLaunch cudaFree */ #ifdef TIMING gettimeofday(&total_end, NULL); double total_time = tvsub(total_start, total_end); double malloc_time = tvsub(malloc_start, malloc_end); double meminit_time = tvsub(meminit_start, meminit_end); double free_time = tvsub(free_start, free_end); double d_malloc_time = tvsub(d_malloc_start, d_malloc_end); double d_meminit_time = tvsub(d_meminit_start, d_meminit_end); double d_free_time = tvsub(d_free_start, d_free_end); double HtoD_time = tvsub(HtoD_start, HtoD_end); double DtoH_time = tvsub(DtoH_start, DtoH_end); double kernel_time = tvsub(kernel_start, kernel_end); printf("================\n"); printf("total_time : \t\t%f\n", total_time); printf("host malloc: \t\t%f\n", malloc_time); printf("host mem init: \t\t%f\n", meminit_time); printf("device malloc: \t\t%f\n", d_malloc_time); printf("device mem init: \t%f\n", d_meminit_time); printf("HtoD: \t\t\t%f\n", HtoD_time); printf("Exec: \t\t\t%f\n", kernel_time); printf("DtoH: \t\t\t%f\n", DtoH_time); printf("device free: \t\t%f\n", d_free_time); printf("host free: \t\t%f\n", free_time); printf("================\n"); #endif return EXIT_SUCCESS; }
12fcc70492225791e98346f78394d216cdfe8c9a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ********************************************** * CS314 Principles of Programming Languages * * Spring 2020 * ********************************************** */ #include <stdio.h> #include <stdlib.h> __global__ void collateSegments_gpu(int * src, int * scanResult, int * output, int numEdges) { /*YOUR CODE HERE*/ int allThreads = blockDim.x * gridDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for(int i = tid; i < numEdges; i += allThreads){ if (src[i] != src[i+1]){ output[src[i]] = scanResult[i]; } } if(tid >= numEdges && allThreads !=0){ return; } }
12fcc70492225791e98346f78394d216cdfe8c9a.cu
/* ********************************************** * CS314 Principles of Programming Languages * * Spring 2020 * ********************************************** */ #include <stdio.h> #include <stdlib.h> __global__ void collateSegments_gpu(int * src, int * scanResult, int * output, int numEdges) { /*YOUR CODE HERE*/ int allThreads = blockDim.x * gridDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for(int i = tid; i < numEdges; i += allThreads){ if (src[i] != src[i+1]){ output[src[i]] = scanResult[i]; } } if(tid >= numEdges && allThreads !=0){ return; } }
1d76385611313965f9d65ed1d60b0ca8c7a280cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "network.h" #include "detection_layer.h" #include "cost_layer.h" #include "utils.h" #include "parser.h" #include "box.h" #include "image.h" #include <sys/time.h> } #ifdef OPENCV #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #define NCLASSES 22 extern "C" image ipl_to_image(IplImage* src); extern "C" void convert_yolo_detections(float *predictions, int classes, int num, int square, int side, int w, int h, float thresh, float **probs, box *boxes, int only_objectness); extern "C" void draw_yolo(image im, int num, float thresh, box *boxes, float **probs); extern "C" char *voc_names[]; extern "C" image voc_labels[]; static float **probs; static box *boxes; static network net; static image in ; static image in_s ; static image det ; static image det_s; static image disp ; static cv::VideoCapture cap; static float fps = 0; static float demo_thresh = 0; void *fetch_in_thread(void *ptr) { cv::Mat frame_m; cap >> frame_m; IplImage frame = frame_m; in = ipl_to_image(&frame); rgbgr_image(in); in_s = resize_image(in, net.w, net.h); return 0; } void *detect_in_thread(void *ptr) { float nms = .4; detection_layer l = net.layers[net.n-1]; float *X = det_s.data; float *predictions = network_predict(net, X); free_image(det_s); convert_yolo_detections(predictions, l.classes, l.n, l.sqrt, l.side, 1, 1, demo_thresh, probs, boxes, 0); if (nms > 0) do_nms(boxes, probs, l.side*l.side*l.n, l.classes, nms); printf("\033[2J"); printf("\033[1;1H"); printf("\nFPS:%.0f\n",fps); printf("Objects:\n\n"); draw_detections(det, l.side*l.side*l.n, demo_thresh, boxes, probs, voc_names, voc_labels, NCLASSES); return 0; } extern "C" void demo_yolo(char *cfgfile, char *weightfile, float thresh, int cam_index) { demo_thresh = thresh; printf("YOLO demo\n"); net = parse_network_cfg(cfgfile); if(weightfile){ load_weights(&net, weightfile); } set_batch_network(&net, 1); srand(2222222); cv::VideoCapture cam(cam_index); cap = cam; if(!cap.isOpened()) error("Couldn't connect to webcam.\n"); detection_layer l = net.layers[net.n-1]; int j; boxes = (box *)calloc(l.side*l.side*l.n, sizeof(box)); probs = (float **)calloc(l.side*l.side*l.n, sizeof(float *)); for(j = 0; j < l.side*l.side*l.n; ++j) probs[j] = (float *)calloc(l.classes, sizeof(float *)); pthread_t fetch_thread; pthread_t detect_thread; fetch_in_thread(0); det = in; det_s = in_s; fetch_in_thread(0); detect_in_thread(0); disp = det; det = in; det_s = in_s; while(1){ struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error("Thread creation failed"); if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error("Thread creation failed"); show_image(disp, "YOLO"); free_image(disp); cvWaitKey(1); pthread_join(fetch_thread, 0); pthread_join(detect_thread, 0); disp = det; det = in; det_s = in_s; gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); float curr = 1000000.f/((long int)tval_result.tv_usec); fps = .9*fps + .1*curr; } } #else extern "C" void demo_yolo(char *cfgfile, char *weightfile, float thresh, int cam_index){ fprintf(stderr, "YOLO demo needs OpenCV for webcam images.\n"); } #endif
1d76385611313965f9d65ed1d60b0ca8c7a280cc.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "network.h" #include "detection_layer.h" #include "cost_layer.h" #include "utils.h" #include "parser.h" #include "box.h" #include "image.h" #include <sys/time.h> } #ifdef OPENCV #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #define NCLASSES 22 extern "C" image ipl_to_image(IplImage* src); extern "C" void convert_yolo_detections(float *predictions, int classes, int num, int square, int side, int w, int h, float thresh, float **probs, box *boxes, int only_objectness); extern "C" void draw_yolo(image im, int num, float thresh, box *boxes, float **probs); extern "C" char *voc_names[]; extern "C" image voc_labels[]; static float **probs; static box *boxes; static network net; static image in ; static image in_s ; static image det ; static image det_s; static image disp ; static cv::VideoCapture cap; static float fps = 0; static float demo_thresh = 0; void *fetch_in_thread(void *ptr) { cv::Mat frame_m; cap >> frame_m; IplImage frame = frame_m; in = ipl_to_image(&frame); rgbgr_image(in); in_s = resize_image(in, net.w, net.h); return 0; } void *detect_in_thread(void *ptr) { float nms = .4; detection_layer l = net.layers[net.n-1]; float *X = det_s.data; float *predictions = network_predict(net, X); free_image(det_s); convert_yolo_detections(predictions, l.classes, l.n, l.sqrt, l.side, 1, 1, demo_thresh, probs, boxes, 0); if (nms > 0) do_nms(boxes, probs, l.side*l.side*l.n, l.classes, nms); printf("\033[2J"); printf("\033[1;1H"); printf("\nFPS:%.0f\n",fps); printf("Objects:\n\n"); draw_detections(det, l.side*l.side*l.n, demo_thresh, boxes, probs, voc_names, voc_labels, NCLASSES); return 0; } extern "C" void demo_yolo(char *cfgfile, char *weightfile, float thresh, int cam_index) { demo_thresh = thresh; printf("YOLO demo\n"); net = parse_network_cfg(cfgfile); if(weightfile){ load_weights(&net, weightfile); } set_batch_network(&net, 1); srand(2222222); cv::VideoCapture cam(cam_index); cap = cam; if(!cap.isOpened()) error("Couldn't connect to webcam.\n"); detection_layer l = net.layers[net.n-1]; int j; boxes = (box *)calloc(l.side*l.side*l.n, sizeof(box)); probs = (float **)calloc(l.side*l.side*l.n, sizeof(float *)); for(j = 0; j < l.side*l.side*l.n; ++j) probs[j] = (float *)calloc(l.classes, sizeof(float *)); pthread_t fetch_thread; pthread_t detect_thread; fetch_in_thread(0); det = in; det_s = in_s; fetch_in_thread(0); detect_in_thread(0); disp = det; det = in; det_s = in_s; while(1){ struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error("Thread creation failed"); if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error("Thread creation failed"); show_image(disp, "YOLO"); free_image(disp); cvWaitKey(1); pthread_join(fetch_thread, 0); pthread_join(detect_thread, 0); disp = det; det = in; det_s = in_s; gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); float curr = 1000000.f/((long int)tval_result.tv_usec); fps = .9*fps + .1*curr; } } #else extern "C" void demo_yolo(char *cfgfile, char *weightfile, float thresh, int cam_index){ fprintf(stderr, "YOLO demo needs OpenCV for webcam images.\n"); } #endif