hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
b74f429e0072674ae97f27b4b150e2088ef0e5d8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "cusolverRf.h" #define TEST_PASSED 0 #define TEST_FAILED 1 int main (void){ /* matrix A */ int n; int nnzA; int *Ap=NULL; int *Ai=NULL; double *Ax=NULL; int *d_Ap=NULL; int *d_Ai=NULL; double *d_rAx=NULL; /* matrices L and U */ int nnzL, nnzU; int *Lp=NULL; int *Li=NULL; double* Lx=NULL; int *Up=NULL; int *Ui=NULL; double* Ux=NULL; /* reordering matrices */ int *P=NULL; int *Q=NULL; int * d_P=NULL; int * d_Q=NULL; /* solution and rhs */ int nrhs; //# of rhs for each system (currently only =1 is supported) double *d_X=NULL; double *d_T=NULL; /* cuda */ hipError_t cudaStatus; /* cuolverRf */ cusolverRfHandle_t gH=NULL; cusolverStatus_t status; /* host sparse direct solver */ /* ... */ /* other variables */ int tnnzL, tnnzU; int *tLp=NULL; int *tLi=NULL; double *tLx=NULL; int *tUp=NULL; int *tUi=NULL; double *tUx=NULL; clock_t t1, t2; /* ASSUMPTION: recall that we are solving a set of linear systems A_{i} x_{i} = f_{i} for i=0,...,k-1 where the sparsity pattern of the coefficient matrices A_{i} as well as the reordering to minimize fill-in and the pivoting used during the LU factorization remain the same. */ /* Step 1: solve the first linear system (i=0) on the host, using host sparse direct solver, which involves full LU factorization and solve. */ /* ... */ /* Step 2: interface to the library by extracting the following information from the first solve: a) triangular factors L and U b) pivoting and reordering permutations P and Q c) also, allocate all the necessary memory */ /* ... */ /* Step 3: use the library to solve subsequent (i=1,...,k-1) linear systems a) the library setup (called only once) */ //create handle status = cusolverRfCreate(&gH); if (status != CUSOLVER_STATUS_SUCCESS){ printf ("[cusolverRf status %d]\n",status); return TEST_FAILED; } //set fast mode status = cusolverRfSetResetValuesFastMode(gH,CUSOLVERRF_RESET_VALUES_FAST_MODE_ON); if (status != CUSOLVER_STATUS_SUCCESS){ printf ("[cusolverRf status %d]\n",status); return TEST_FAILED; } //assemble internal data structures (you should use the coeffcient matrix A //corresponding to the second (i=1) linear system in this call) t1 = clock(); status = cusolverRfSetupHost(n, nnzA, Ap, Ai, Ax, nnzL, Lp, Li, Lx, nnzU, Up, Ui, Ux, P, Q, gH); cudaStatus = hipDeviceSynchronize(); t2 = clock(); if ((status != CUSOLVER_STATUS_SUCCESS) || (cudaStatus != hipSuccess)) { printf ("[cusolverRf status %d]\n",status); return TEST_FAILED; } printf("cusolverRfSetupHost time = %f (s)\n", (t2-t1)/(float)CLOCKS_PER_SEC); //analyze available parallelism t1 = clock(); status = cusolverRfAnalyze(gH); cudaStatus = hipDeviceSynchronize(); t2 = clock(); if ((status != CUSOLVER_STATUS_SUCCESS) || (cudaStatus != hipSuccess)) { printf ("[cusolverRf status %d]\n",status); return TEST_FAILED; } printf("cusolverRfAnalyze time = %f (s)\n", (t2-t1)/(float)CLOCKS_PER_SEC); /* b) The library subsequent (i=1,...,k-1) LU re-factorization and solve (called multiple times). */ int k = 2; for (int i=1; i<k; i++){ //LU re-factorization t1 = clock(); status = cusolverRfRefactor(gH); cudaStatus = hipDeviceSynchronize(); t2 = clock(); if ((status != CUSOLVER_STATUS_SUCCESS) || (cudaStatus != hipSuccess)) { printf ("[cusolverRF status %d]\n",status); return TEST_FAILED; } printf("cuSolverReRefactor time = %f (s)\n", (t2-t1)/(float)CLOCKS_PER_SEC); //forward and backward solve t1 = clock(); status = cusolverRfSolve(gH, d_P, d_Q, nrhs, d_T, n, d_X, n); cudaStatus = hipDeviceSynchronize(); t2 = clock(); if ((status != CUSOLVER_STATUS_SUCCESS) || (cudaStatus != hipSuccess)) { printf ("[cusolverRf status %d]\n",status); return TEST_FAILED; } printf("cusolverRfSolve time = %f (s)\n", (t2-t1)/(float)CLOCKS_PER_SEC); // extract the factors (if needed) status = cusolverRfExtractSplitFactorsHost(gH, &tnnzL, &tLp, &tLi, &tLx, &tnnzU, &tUp, &tUi, &tUx); if(status != CUSOLVER_STATUS_SUCCESS){ printf ("[cusolverRf status %d]\n",status); return TEST_FAILED; } /* //print int row, j; printf("printing L\n"); for (row=0; row<n; row++){ for (j=tLp[row]; j<tLp[row+1]; j++){ printf("\%d,\%d,\%f\n",row,tLi[j],tLx[j]); } } printf("printing U\n"); for (row=0; row<n; row++){ for (j=tUp[row]; j<tUp[row+1]; j++){ printf("\%d,\%d,\%f\n",row,tUi[j],tUx[j]); } } */ /* perform any other operations based on the solution */ /* ... */ /* check if done */ /* ... */ /* proceed to solve the next linear system */ // update the coefficient matrix using reset values // (assuming that the new linear system, in other words, // new values are already on the GPU in the array d_rAx) t1 = clock(); status = cusolverRfResetValues(n,nnzA,d_Ap,d_Ai,d_rAx,d_P,d_Q,gH); cudaStatus = hipDeviceSynchronize(); t2 = clock(); if ((status != CUSOLVER_STATUS_SUCCESS) || (cudaStatus != hipSuccess)) { printf ("[cusolverRf status %d]\n",status); return TEST_FAILED; } printf("cusolverRf_reset_values time = %f (s)\n", (t2-t1)/(float)CLOCKS_PER_SEC); } /* free memory and exit */ /* ... */ return TEST_PASSED; }
b74f429e0072674ae97f27b4b150e2088ef0e5d8.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include "cusolverRf.h" #define TEST_PASSED 0 #define TEST_FAILED 1 int main (void){ /* matrix A */ int n; int nnzA; int *Ap=NULL; int *Ai=NULL; double *Ax=NULL; int *d_Ap=NULL; int *d_Ai=NULL; double *d_rAx=NULL; /* matrices L and U */ int nnzL, nnzU; int *Lp=NULL; int *Li=NULL; double* Lx=NULL; int *Up=NULL; int *Ui=NULL; double* Ux=NULL; /* reordering matrices */ int *P=NULL; int *Q=NULL; int * d_P=NULL; int * d_Q=NULL; /* solution and rhs */ int nrhs; //# of rhs for each system (currently only =1 is supported) double *d_X=NULL; double *d_T=NULL; /* cuda */ cudaError_t cudaStatus; /* cuolverRf */ cusolverRfHandle_t gH=NULL; cusolverStatus_t status; /* host sparse direct solver */ /* ... */ /* other variables */ int tnnzL, tnnzU; int *tLp=NULL; int *tLi=NULL; double *tLx=NULL; int *tUp=NULL; int *tUi=NULL; double *tUx=NULL; clock_t t1, t2; /* ASSUMPTION: recall that we are solving a set of linear systems A_{i} x_{i} = f_{i} for i=0,...,k-1 where the sparsity pattern of the coefficient matrices A_{i} as well as the reordering to minimize fill-in and the pivoting used during the LU factorization remain the same. */ /* Step 1: solve the first linear system (i=0) on the host, using host sparse direct solver, which involves full LU factorization and solve. */ /* ... */ /* Step 2: interface to the library by extracting the following information from the first solve: a) triangular factors L and U b) pivoting and reordering permutations P and Q c) also, allocate all the necessary memory */ /* ... */ /* Step 3: use the library to solve subsequent (i=1,...,k-1) linear systems a) the library setup (called only once) */ //create handle status = cusolverRfCreate(&gH); if (status != CUSOLVER_STATUS_SUCCESS){ printf ("[cusolverRf status %d]\n",status); return TEST_FAILED; } //set fast mode status = cusolverRfSetResetValuesFastMode(gH,CUSOLVERRF_RESET_VALUES_FAST_MODE_ON); if (status != CUSOLVER_STATUS_SUCCESS){ printf ("[cusolverRf status %d]\n",status); return TEST_FAILED; } //assemble internal data structures (you should use the coeffcient matrix A //corresponding to the second (i=1) linear system in this call) t1 = clock(); status = cusolverRfSetupHost(n, nnzA, Ap, Ai, Ax, nnzL, Lp, Li, Lx, nnzU, Up, Ui, Ux, P, Q, gH); cudaStatus = cudaDeviceSynchronize(); t2 = clock(); if ((status != CUSOLVER_STATUS_SUCCESS) || (cudaStatus != cudaSuccess)) { printf ("[cusolverRf status %d]\n",status); return TEST_FAILED; } printf("cusolverRfSetupHost time = %f (s)\n", (t2-t1)/(float)CLOCKS_PER_SEC); //analyze available parallelism t1 = clock(); status = cusolverRfAnalyze(gH); cudaStatus = cudaDeviceSynchronize(); t2 = clock(); if ((status != CUSOLVER_STATUS_SUCCESS) || (cudaStatus != cudaSuccess)) { printf ("[cusolverRf status %d]\n",status); return TEST_FAILED; } printf("cusolverRfAnalyze time = %f (s)\n", (t2-t1)/(float)CLOCKS_PER_SEC); /* b) The library subsequent (i=1,...,k-1) LU re-factorization and solve (called multiple times). */ int k = 2; for (int i=1; i<k; i++){ //LU re-factorization t1 = clock(); status = cusolverRfRefactor(gH); cudaStatus = cudaDeviceSynchronize(); t2 = clock(); if ((status != CUSOLVER_STATUS_SUCCESS) || (cudaStatus != cudaSuccess)) { printf ("[cusolverRF status %d]\n",status); return TEST_FAILED; } printf("cuSolverReRefactor time = %f (s)\n", (t2-t1)/(float)CLOCKS_PER_SEC); //forward and backward solve t1 = clock(); status = cusolverRfSolve(gH, d_P, d_Q, nrhs, d_T, n, d_X, n); cudaStatus = cudaDeviceSynchronize(); t2 = clock(); if ((status != CUSOLVER_STATUS_SUCCESS) || (cudaStatus != cudaSuccess)) { printf ("[cusolverRf status %d]\n",status); return TEST_FAILED; } printf("cusolverRfSolve time = %f (s)\n", (t2-t1)/(float)CLOCKS_PER_SEC); // extract the factors (if needed) status = cusolverRfExtractSplitFactorsHost(gH, &tnnzL, &tLp, &tLi, &tLx, &tnnzU, &tUp, &tUi, &tUx); if(status != CUSOLVER_STATUS_SUCCESS){ printf ("[cusolverRf status %d]\n",status); return TEST_FAILED; } /* //print int row, j; printf("printing L\n"); for (row=0; row<n; row++){ for (j=tLp[row]; j<tLp[row+1]; j++){ printf("\%d,\%d,\%f\n",row,tLi[j],tLx[j]); } } printf("printing U\n"); for (row=0; row<n; row++){ for (j=tUp[row]; j<tUp[row+1]; j++){ printf("\%d,\%d,\%f\n",row,tUi[j],tUx[j]); } } */ /* perform any other operations based on the solution */ /* ... */ /* check if done */ /* ... */ /* proceed to solve the next linear system */ // update the coefficient matrix using reset values // (assuming that the new linear system, in other words, // new values are already on the GPU in the array d_rAx) t1 = clock(); status = cusolverRfResetValues(n,nnzA,d_Ap,d_Ai,d_rAx,d_P,d_Q,gH); cudaStatus = cudaDeviceSynchronize(); t2 = clock(); if ((status != CUSOLVER_STATUS_SUCCESS) || (cudaStatus != cudaSuccess)) { printf ("[cusolverRf status %d]\n",status); return TEST_FAILED; } printf("cusolverRf_reset_values time = %f (s)\n", (t2-t1)/(float)CLOCKS_PER_SEC); } /* free memory and exit */ /* ... */ return TEST_PASSED; }
31631f8cca38997a305c3a7f299bd92b36a2b6e8.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "unit_test.h" #include "amg_solver.h" #include <matrix_io.h> #include "test_utils.h" #include "util.h" #include "time.h" #include <determinism_checker.h> #include <solvers/solver.h> #ifdef AMGX_WITH_MPI #include <mpi.h> #endif namespace amgx { template<class TConfig> class UnittestSolverFactory; template<class TConfig> class UnittestSolver: public Solver<TConfig> { public: typedef Matrix<TConfig> MMatrix; typedef Vector<TConfig> VVector; UnittestSolver( AMG_Config &cfg, const std::string &cfg_scope ): is_setup(false), is_init(false), is_finalized(true), Solver<TConfig>( cfg, cfg_scope ) { UnittestSolverFactory<TConfig>::ref_count++; } virtual ~UnittestSolver() { UnittestSolverFactory<TConfig>::ref_count--; if ( !is_finalized ) { FatalError("UnittestSolver has not been finalized before destruction", AMGX_ERR_UNKNOWN); } } virtual void solver_setup(bool reuse_matrix_structure) { is_setup = true; is_init = false; } // What coloring level is needed for the matrix? virtual bool isColoringNeeded() const { return true; } // What reordering level is needed for the matrix? virtual bool getReorderColsByColorDesired() const { return true; } virtual bool getInsertDiagonalDesired() const { return true; } // Does the solver requires the residual vector storage virtual bool is_residual_needed( ) const { return false; } // Initialize the solver before running the iterations. virtual void solve_init( VVector &b, VVector &x, bool xIsZero ) { if ( is_init ) { FatalError("UnittestSolver has already been initialized", AMGX_ERR_UNKNOWN); } if ( !is_setup ) { FatalError("UnittestSolver has not been set up before calling init_solve", AMGX_ERR_UNKNOWN); } is_init = true; is_finalized = false; } // Run a single iteration. Compute the residual and its norm and decide convergence. virtual bool solve_iteration( VVector &b, VVector &x, bool xIsZero ) { if ( !is_init ) { FatalError("UnittestSolver has not been initialized before calling solve_iteration", AMGX_ERR_UNKNOWN); } return true; } // Finalize the solver after running the iterations. virtual void solve_finalize( VVector &b, VVector &x ) { if ( is_finalized ) { FatalError("UnittestSolver has already been finalized", AMGX_ERR_UNKNOWN); } if ( !is_init ) { FatalError("UnittestSolver has not been initialized before calling solve_iteration", AMGX_ERR_UNKNOWN); } is_finalized = true; is_init = false; } private: bool is_setup; bool is_init; bool is_finalized; }; template<class T_Config> class UnittestSolverFactory : public SolverFactory<T_Config> { public: Solver<T_Config> *create( AMG_Config &cfg, const std::string &cfg_scope, ThreadManager *tmng ) { return new UnittestSolver<T_Config>( cfg, cfg_scope); } static int ref_count; }; //init ref_count template<class TConfig> int UnittestSolverFactory<TConfig>::ref_count = 0; //instantiate template solver #define AMGX_CASE_LINE(CASE) template class UnittestSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE // parameter is used as test name DECLARE_UNITTEST_BEGIN(PreconditionerUsage); void run() { #ifdef AMGX_WITH_MPI int mpiFlag; MPI_Initialized(&mpiFlag); if ( !mpiFlag ) { int argc = 1; char **argv = NULL; MPI_Init( &argc, &argv); } #endif AMG_Config cfg; cfg.parseParameterString("config_version=2,\ determinism_flag=1,\ max_levels=1,\ max_iters=1,\ use_scalar_norm=1,\ reorder_cols_by_color=1,\ insert_diag_while_reordering=1,\ coloring_level=1,\ preconditioner=UNITTEST_SOLVER"); //read matrices std::string filename = UnitTest::get_configuration().data_folder + "Internal/poisson/poisson9x4x4x4.mtx.bin"; MatrixA A1, A2; Vector<T_Config> x1, b1, x2, b2; Vector_h x_h, b_h; Matrix_h A_h; typedef TemplateConfig<AMGX_host, T_Config::vecPrec, T_Config::matPrec, AMGX_indInt> Config_h; A_h.set_initialized(0); A_h.addProps(CSR); std::string fail_msg = "Cannot open " + filename; this->PrintOnFail(fail_msg.c_str()); UNITTEST_ASSERT_TRUE(MatrixIO<Config_h>::readSystem( filename.c_str(), A_h, b_h, x_h ) == AMGX_OK); //copy to device A1 = A_h; x1 = x_h; b1 = b_h; hipDeviceSynchronize(); cudaCheckError(); b1.set_block_dimx(1); b1.set_block_dimy(A1.get_block_dimy()); x1.set_block_dimx(1); x1.set_block_dimy(A1.get_block_dimx()); x1.resize( b1.size(), 1.0 ); b2.set_block_dimx(1); b2.set_block_dimy(A2.get_block_dimy()); x2.set_block_dimx(1); x2.set_block_dimy(A2.get_block_dimx()); x2.resize( b2.size(), 1.0 ); typename SolverFactory<T_Config>::Iterator iter = SolverFactory<T_Config>::getIterator(); SolverFactory<T_Config>::registerFactory("UNITTEST_SOLVER", new UnittestSolverFactory<T_Config>); MatrixA *A; Vector<T_Config> *x, *b; while (!SolverFactory<T_Config>::isIteratorLast(iter)) { //skip std::string m_name = iter->first.c_str(); if ( m_name.compare( "BLOCK_DILU" ) == 0 || //not implemented on device //m_name.compare( "KACZMARZ" ) == 0 || // breaks with coloring m_name.compare( "FIXCOLOR_GS" ) == 0 ) //deprecated, unusable { iter++; continue; } if ( m_name.compare( "MULTICOLOR_ILU" ) == 0 ) { A = &A2; x = &x2; b = &b2; } else { A = &A1; x = &x1; b = &b1; } Solver<T_Config> *solver = iter->second->create( cfg, "default" ); A->setupMatrix( solver, cfg, false ); solver->setup( *A, false ); solver->solve( *b, *x, false ); solver->solve( *b, *x, false ); delete solver; fail_msg = "preconditioner has not been destroyed by " + iter->first; PrintOnFail(fail_msg.c_str()); UNITTEST_ASSERT_TRUE( UnittestSolverFactory<T_Config>::ref_count == 0 ); iter++; } } DECLARE_UNITTEST_END(PreconditionerUsage); // or run for all device configs #define AMGX_CASE_LINE(CASE) PreconditionerUsage <TemplateMode<CASE>::Type> PreconditionerUsage_##CASE; AMGX_FORALL_BUILDS_DEVICE(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } //namespace amgx
31631f8cca38997a305c3a7f299bd92b36a2b6e8.cu
/* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "unit_test.h" #include "amg_solver.h" #include <matrix_io.h> #include "test_utils.h" #include "util.h" #include "time.h" #include <determinism_checker.h> #include <solvers/solver.h> #ifdef AMGX_WITH_MPI #include <mpi.h> #endif namespace amgx { template<class TConfig> class UnittestSolverFactory; template<class TConfig> class UnittestSolver: public Solver<TConfig> { public: typedef Matrix<TConfig> MMatrix; typedef Vector<TConfig> VVector; UnittestSolver( AMG_Config &cfg, const std::string &cfg_scope ): is_setup(false), is_init(false), is_finalized(true), Solver<TConfig>( cfg, cfg_scope ) { UnittestSolverFactory<TConfig>::ref_count++; } virtual ~UnittestSolver() { UnittestSolverFactory<TConfig>::ref_count--; if ( !is_finalized ) { FatalError("UnittestSolver has not been finalized before destruction", AMGX_ERR_UNKNOWN); } } virtual void solver_setup(bool reuse_matrix_structure) { is_setup = true; is_init = false; } // What coloring level is needed for the matrix? virtual bool isColoringNeeded() const { return true; } // What reordering level is needed for the matrix? virtual bool getReorderColsByColorDesired() const { return true; } virtual bool getInsertDiagonalDesired() const { return true; } // Does the solver requires the residual vector storage virtual bool is_residual_needed( ) const { return false; } // Initialize the solver before running the iterations. virtual void solve_init( VVector &b, VVector &x, bool xIsZero ) { if ( is_init ) { FatalError("UnittestSolver has already been initialized", AMGX_ERR_UNKNOWN); } if ( !is_setup ) { FatalError("UnittestSolver has not been set up before calling init_solve", AMGX_ERR_UNKNOWN); } is_init = true; is_finalized = false; } // Run a single iteration. Compute the residual and its norm and decide convergence. virtual bool solve_iteration( VVector &b, VVector &x, bool xIsZero ) { if ( !is_init ) { FatalError("UnittestSolver has not been initialized before calling solve_iteration", AMGX_ERR_UNKNOWN); } return true; } // Finalize the solver after running the iterations. virtual void solve_finalize( VVector &b, VVector &x ) { if ( is_finalized ) { FatalError("UnittestSolver has already been finalized", AMGX_ERR_UNKNOWN); } if ( !is_init ) { FatalError("UnittestSolver has not been initialized before calling solve_iteration", AMGX_ERR_UNKNOWN); } is_finalized = true; is_init = false; } private: bool is_setup; bool is_init; bool is_finalized; }; template<class T_Config> class UnittestSolverFactory : public SolverFactory<T_Config> { public: Solver<T_Config> *create( AMG_Config &cfg, const std::string &cfg_scope, ThreadManager *tmng ) { return new UnittestSolver<T_Config>( cfg, cfg_scope); } static int ref_count; }; //init ref_count template<class TConfig> int UnittestSolverFactory<TConfig>::ref_count = 0; //instantiate template solver #define AMGX_CASE_LINE(CASE) template class UnittestSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE // parameter is used as test name DECLARE_UNITTEST_BEGIN(PreconditionerUsage); void run() { #ifdef AMGX_WITH_MPI int mpiFlag; MPI_Initialized(&mpiFlag); if ( !mpiFlag ) { int argc = 1; char **argv = NULL; MPI_Init( &argc, &argv); } #endif AMG_Config cfg; cfg.parseParameterString("config_version=2,\ determinism_flag=1,\ max_levels=1,\ max_iters=1,\ use_scalar_norm=1,\ reorder_cols_by_color=1,\ insert_diag_while_reordering=1,\ coloring_level=1,\ preconditioner=UNITTEST_SOLVER"); //read matrices std::string filename = UnitTest::get_configuration().data_folder + "Internal/poisson/poisson9x4x4x4.mtx.bin"; MatrixA A1, A2; Vector<T_Config> x1, b1, x2, b2; Vector_h x_h, b_h; Matrix_h A_h; typedef TemplateConfig<AMGX_host, T_Config::vecPrec, T_Config::matPrec, AMGX_indInt> Config_h; A_h.set_initialized(0); A_h.addProps(CSR); std::string fail_msg = "Cannot open " + filename; this->PrintOnFail(fail_msg.c_str()); UNITTEST_ASSERT_TRUE(MatrixIO<Config_h>::readSystem( filename.c_str(), A_h, b_h, x_h ) == AMGX_OK); //copy to device A1 = A_h; x1 = x_h; b1 = b_h; cudaDeviceSynchronize(); cudaCheckError(); b1.set_block_dimx(1); b1.set_block_dimy(A1.get_block_dimy()); x1.set_block_dimx(1); x1.set_block_dimy(A1.get_block_dimx()); x1.resize( b1.size(), 1.0 ); b2.set_block_dimx(1); b2.set_block_dimy(A2.get_block_dimy()); x2.set_block_dimx(1); x2.set_block_dimy(A2.get_block_dimx()); x2.resize( b2.size(), 1.0 ); typename SolverFactory<T_Config>::Iterator iter = SolverFactory<T_Config>::getIterator(); SolverFactory<T_Config>::registerFactory("UNITTEST_SOLVER", new UnittestSolverFactory<T_Config>); MatrixA *A; Vector<T_Config> *x, *b; while (!SolverFactory<T_Config>::isIteratorLast(iter)) { //skip std::string m_name = iter->first.c_str(); if ( m_name.compare( "BLOCK_DILU" ) == 0 || //not implemented on device //m_name.compare( "KACZMARZ" ) == 0 || // breaks with coloring m_name.compare( "FIXCOLOR_GS" ) == 0 ) //deprecated, unusable { iter++; continue; } if ( m_name.compare( "MULTICOLOR_ILU" ) == 0 ) { A = &A2; x = &x2; b = &b2; } else { A = &A1; x = &x1; b = &b1; } Solver<T_Config> *solver = iter->second->create( cfg, "default" ); A->setupMatrix( solver, cfg, false ); solver->setup( *A, false ); solver->solve( *b, *x, false ); solver->solve( *b, *x, false ); delete solver; fail_msg = "preconditioner has not been destroyed by " + iter->first; PrintOnFail(fail_msg.c_str()); UNITTEST_ASSERT_TRUE( UnittestSolverFactory<T_Config>::ref_count == 0 ); iter++; } } DECLARE_UNITTEST_END(PreconditionerUsage); // or run for all device configs #define AMGX_CASE_LINE(CASE) PreconditionerUsage <TemplateMode<CASE>::Type> PreconditionerUsage_##CASE; AMGX_FORALL_BUILDS_DEVICE(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } //namespace amgx
d287c2526ba45c749a2c92aec80b849610369f48.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2018-2019 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <xgboost/data.h> #include <xgboost/linear_updater.h> #include "xgboost/span.h" #include "coordinate_common.h" #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "./param.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT public: // set training parameter void Configure(Args const& args) override { tparam_.UpdateAllowUnknown(args); coord_param_.UpdateAllowUnknown(args); selector_.reset(FeatureSelector::Create(tparam_.feature_selector, ctx_->Threads())); monitor_.Init("GPUCoordinateUpdater"); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("linear_train_param"), &tparam_); FromJson(config.at("coordinate_param"), &coord_param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["linear_train_param"] = ToJson(tparam_); out["coordinate_param"] = ToJson(coord_param_); } void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) { if (ctx_->gpu_id < 0) return; num_row_ = static_cast<size_t>(p_fmat->Info().num_row_); CHECK(p_fmat->SingleColBlock()); SparsePage const& batch = *(p_fmat->GetBatches<CSCPage>().begin()); auto page = batch.GetView(); if (IsEmpty()) { return; } dh::safe_cuda(hipSetDevice(ctx_->gpu_id)); // The begin and end indices for the section of each column associated with // this device std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; // iterate through columns for (size_t fidx = 0; fidx < batch.Size(); fidx++) { common::Span<Entry const> col = page[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(0, 0.0f), cmp); auto column_end = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(num_row_, 0.0f), cmp); column_segments.emplace_back(static_cast<bst_uint>(column_begin - col.cbegin()), static_cast<bst_uint>(column_end - col.cbegin())); row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin)); } data_.resize(row_ptr_.back()); gpair_.resize(num_row_ * model_param.num_output_group); for (size_t fidx = 0; fidx < batch.Size(); fidx++) { auto col = page[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(hipMemcpy( data_.data().get() + row_ptr_[fidx], col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), hipMemcpyHostToDevice)); } } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); monitor_.Start("LazyInitDevice"); this->LazyInitDevice(p_fmat, *(model->learner_model_param)); monitor_.Stop("LazyInitDevice"); monitor_.Start("UpdateGpair"); auto &in_gpair_host = in_gpair->ConstHostVector(); // Update gpair if (ctx_->gpu_id >= 0) { this->UpdateGpair(in_gpair_host); } monitor_.Stop("UpdateGpair"); monitor_.Start("UpdateBias"); this->UpdateBias(model); monitor_.Stop("UpdateBias"); // prepare for updating the weights selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, coord_param_.top_k); monitor_.Start("UpdateFeature"); for (uint32_t group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { for (auto i = 0U; i < model->learner_model_param->num_feature; i++) { auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, model); } } monitor_.Stop("UpdateFeature"); } void UpdateBias(gbm::GBLinearModel *model) { for (uint32_t group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { // Get gradient auto grad = GradientPair(0, 0); if (ctx_->gpu_id >= 0) { grad = GetBiasGradient(group_idx, model->learner_model_param->num_output_group); } auto dbias = static_cast<float>( tparam_.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->Bias()[group_idx] += dbias; // Update residual if (ctx_->gpu_id >= 0) { UpdateBiasResidual(dbias, group_idx, model->learner_model_param->num_output_group); } } } void UpdateFeature(int fidx, int group_idx, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = GradientPair(0, 0); if (ctx_->gpu_id >= 0) { grad = GetGradient(group_idx, model->learner_model_param->num_output_group, fidx); } auto dw = static_cast<float>(tparam_.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm)); w += dw; if (ctx_->gpu_id >= 0) { UpdateResidual(dw, group_idx, model->learner_model_param->num_output_group, fidx); } } // This needs to be public because of the __device__ lambda. GradientPair GetBiasGradient(int group_idx, int num_group) { dh::safe_cuda(hipSetDevice(ctx_->gpu_id)); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.data(), skip); return dh::SumReduction(perm, num_row_); } // This needs to be public because of the __device__ lambda. void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = dh::ToSpan(gpair_); dh::LaunchN(num_row_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } // This needs to be public because of the __device__ lambda. GradientPair GetGradient(int group_idx, int num_group, int fidx) { dh::safe_cuda(hipSetDevice(ctx_->gpu_id)); common::Span<xgboost::Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(multiply_iterator, col_size); } // This needs to be public because of the __device__ lambda. void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); common::Span<Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } private: bool IsEmpty() { return num_row_ == 0; } void UpdateGpair(const std::vector<GradientPair> &host_gpair) { dh::safe_cuda(hipMemcpyAsync( gpair_.data().get(), host_gpair.data(), gpair_.size() * sizeof(GradientPair), hipMemcpyHostToDevice)); } // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; std::unique_ptr<FeatureSelector> selector_; common::Monitor monitor_; std::vector<size_t> row_ptr_; dh::device_vector<xgboost::Entry> data_; dh::caching_device_vector<GradientPair> gpair_; size_t num_row_; }; XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
d287c2526ba45c749a2c92aec80b849610369f48.cu
/*! * Copyright 2018-2019 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <xgboost/data.h> #include <xgboost/linear_updater.h> #include "xgboost/span.h" #include "coordinate_common.h" #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "./param.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT public: // set training parameter void Configure(Args const& args) override { tparam_.UpdateAllowUnknown(args); coord_param_.UpdateAllowUnknown(args); selector_.reset(FeatureSelector::Create(tparam_.feature_selector, ctx_->Threads())); monitor_.Init("GPUCoordinateUpdater"); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("linear_train_param"), &tparam_); FromJson(config.at("coordinate_param"), &coord_param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["linear_train_param"] = ToJson(tparam_); out["coordinate_param"] = ToJson(coord_param_); } void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) { if (ctx_->gpu_id < 0) return; num_row_ = static_cast<size_t>(p_fmat->Info().num_row_); CHECK(p_fmat->SingleColBlock()); SparsePage const& batch = *(p_fmat->GetBatches<CSCPage>().begin()); auto page = batch.GetView(); if (IsEmpty()) { return; } dh::safe_cuda(cudaSetDevice(ctx_->gpu_id)); // The begin and end indices for the section of each column associated with // this device std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; // iterate through columns for (size_t fidx = 0; fidx < batch.Size(); fidx++) { common::Span<Entry const> col = page[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(0, 0.0f), cmp); auto column_end = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(num_row_, 0.0f), cmp); column_segments.emplace_back(static_cast<bst_uint>(column_begin - col.cbegin()), static_cast<bst_uint>(column_end - col.cbegin())); row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin)); } data_.resize(row_ptr_.back()); gpair_.resize(num_row_ * model_param.num_output_group); for (size_t fidx = 0; fidx < batch.Size(); fidx++) { auto col = page[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(cudaMemcpy( data_.data().get() + row_ptr_[fidx], col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), cudaMemcpyHostToDevice)); } } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); monitor_.Start("LazyInitDevice"); this->LazyInitDevice(p_fmat, *(model->learner_model_param)); monitor_.Stop("LazyInitDevice"); monitor_.Start("UpdateGpair"); auto &in_gpair_host = in_gpair->ConstHostVector(); // Update gpair if (ctx_->gpu_id >= 0) { this->UpdateGpair(in_gpair_host); } monitor_.Stop("UpdateGpair"); monitor_.Start("UpdateBias"); this->UpdateBias(model); monitor_.Stop("UpdateBias"); // prepare for updating the weights selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, coord_param_.top_k); monitor_.Start("UpdateFeature"); for (uint32_t group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { for (auto i = 0U; i < model->learner_model_param->num_feature; i++) { auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, model); } } monitor_.Stop("UpdateFeature"); } void UpdateBias(gbm::GBLinearModel *model) { for (uint32_t group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { // Get gradient auto grad = GradientPair(0, 0); if (ctx_->gpu_id >= 0) { grad = GetBiasGradient(group_idx, model->learner_model_param->num_output_group); } auto dbias = static_cast<float>( tparam_.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->Bias()[group_idx] += dbias; // Update residual if (ctx_->gpu_id >= 0) { UpdateBiasResidual(dbias, group_idx, model->learner_model_param->num_output_group); } } } void UpdateFeature(int fidx, int group_idx, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = GradientPair(0, 0); if (ctx_->gpu_id >= 0) { grad = GetGradient(group_idx, model->learner_model_param->num_output_group, fidx); } auto dw = static_cast<float>(tparam_.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm)); w += dw; if (ctx_->gpu_id >= 0) { UpdateResidual(dw, group_idx, model->learner_model_param->num_output_group, fidx); } } // This needs to be public because of the __device__ lambda. GradientPair GetBiasGradient(int group_idx, int num_group) { dh::safe_cuda(cudaSetDevice(ctx_->gpu_id)); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.data(), skip); return dh::SumReduction(perm, num_row_); } // This needs to be public because of the __device__ lambda. void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = dh::ToSpan(gpair_); dh::LaunchN(num_row_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } // This needs to be public because of the __device__ lambda. GradientPair GetGradient(int group_idx, int num_group, int fidx) { dh::safe_cuda(cudaSetDevice(ctx_->gpu_id)); common::Span<xgboost::Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(multiply_iterator, col_size); } // This needs to be public because of the __device__ lambda. void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); common::Span<Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } private: bool IsEmpty() { return num_row_ == 0; } void UpdateGpair(const std::vector<GradientPair> &host_gpair) { dh::safe_cuda(cudaMemcpyAsync( gpair_.data().get(), host_gpair.data(), gpair_.size() * sizeof(GradientPair), cudaMemcpyHostToDevice)); } // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; std::unique_ptr<FeatureSelector> selector_; common::Monitor monitor_; std::vector<size_t> row_ptr_; dh::device_vector<xgboost::Entry> data_; dh::caching_device_vector<GradientPair> gpair_; size_t num_row_; }; XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
0db03b9e734fe24cd79e7349bfc5e9979a67f917.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <helpers/PointersManager.h> #include <ops/declarable/helpers/flatten.h> #include "execution/cuda/LaunchDims.h" namespace sd { namespace ops { namespace helpers { template <typename T> static void SD_KERNEL flattenKernel(void **xBuffers, sd::LongType **xShapeInfos, sd::LongType *offsets, sd::LongType numInputs, void *zBuffer, const sd::LongType *zShapeInfo, char order) { int xCoord[SD_MAX_RANK]; // each block of threads works on 1 input array for (sd::LongType e = blockIdx.x; e < numInputs; e += gridDim.x) { auto z = reinterpret_cast<T *>(zBuffer) + offsets[e]; auto xBuffer = reinterpret_cast<T *>(xBuffers[e]); auto xShapeInfo = xShapeInfos[e]; auto xLength = shape::length(xShapeInfo); // each element of this input array has own place within common output array for (sd::LongType i = threadIdx.x; i < xLength; i += blockDim.x) z[i] = xBuffer[getIndexOffsetOrdered(i, xShapeInfo, order)]; } } template <typename T> static void flatten_(sd::LaunchContext *context, std::vector<NDArray *> &inputs, NDArray *output, char order) { PointersManager pm(context, "flatten"); std::vector<const void *> hdBuffers(inputs.size()); std::vector<sd::LongType> hOffsets(inputs.size()); std::vector<const sd::LongType *> hdShapes(inputs.size()); sd::LongType cOffset = 0; // calculating offsets in output for (int e = 0; e < inputs.size(); e++) { hOffsets[e] = cOffset; cOffset += inputs[e]->lengthOf(); hdBuffers[e] = inputs[e]->specialBuffer(); hdShapes[e] = inputs[e]->specialShapeInfo(); } // copying pointers to device auto dBuffers = (void **)pm.replicatePointer(hdBuffers.data(), inputs.size() * sizeof(void *)); auto dShapes = (sd::LongType **)pm.replicatePointer(hdShapes.data(), inputs.size() * sizeof(sd::LongType *)); auto dOffsets = (sd::LongType *)pm.replicatePointer(hOffsets.data(), inputs.size() * sizeof(sd::LongType)); dim3 launchDims = getLaunchDims("flatten"); hipLaunchKernelGGL(( flattenKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *context->getCudaStream(), dBuffers, dShapes, dOffsets, inputs.size(), output->specialBuffer(), output->specialShapeInfo(), order); pm.synchronize(); } void flatten(sd::LaunchContext *context, std::vector<NDArray *> &inputs, NDArray *output, char order) { // FIXME: we want NDArrayFactory::prepareSpecialUse here eventually const std::vector<const NDArray *> v(inputs.begin(), inputs.end()); //prepareSpecialUse requires const NDArray::prepareSpecialUse({output}, v, {}); BUILD_SINGLE_SELECTOR(output->dataType(), flatten_, (context, inputs, output, order), SD_COMMON_TYPES); NDArray::registerSpecialUse({output}, {}); } } // namespace helpers } // namespace ops } // namespace sd
0db03b9e734fe24cd79e7349bfc5e9979a67f917.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <helpers/PointersManager.h> #include <ops/declarable/helpers/flatten.h> #include "execution/cuda/LaunchDims.h" namespace sd { namespace ops { namespace helpers { template <typename T> static void SD_KERNEL flattenKernel(void **xBuffers, sd::LongType **xShapeInfos, sd::LongType *offsets, sd::LongType numInputs, void *zBuffer, const sd::LongType *zShapeInfo, char order) { int xCoord[SD_MAX_RANK]; // each block of threads works on 1 input array for (sd::LongType e = blockIdx.x; e < numInputs; e += gridDim.x) { auto z = reinterpret_cast<T *>(zBuffer) + offsets[e]; auto xBuffer = reinterpret_cast<T *>(xBuffers[e]); auto xShapeInfo = xShapeInfos[e]; auto xLength = shape::length(xShapeInfo); // each element of this input array has own place within common output array for (sd::LongType i = threadIdx.x; i < xLength; i += blockDim.x) z[i] = xBuffer[getIndexOffsetOrdered(i, xShapeInfo, order)]; } } template <typename T> static void flatten_(sd::LaunchContext *context, std::vector<NDArray *> &inputs, NDArray *output, char order) { PointersManager pm(context, "flatten"); std::vector<const void *> hdBuffers(inputs.size()); std::vector<sd::LongType> hOffsets(inputs.size()); std::vector<const sd::LongType *> hdShapes(inputs.size()); sd::LongType cOffset = 0; // calculating offsets in output for (int e = 0; e < inputs.size(); e++) { hOffsets[e] = cOffset; cOffset += inputs[e]->lengthOf(); hdBuffers[e] = inputs[e]->specialBuffer(); hdShapes[e] = inputs[e]->specialShapeInfo(); } // copying pointers to device auto dBuffers = (void **)pm.replicatePointer(hdBuffers.data(), inputs.size() * sizeof(void *)); auto dShapes = (sd::LongType **)pm.replicatePointer(hdShapes.data(), inputs.size() * sizeof(sd::LongType *)); auto dOffsets = (sd::LongType *)pm.replicatePointer(hOffsets.data(), inputs.size() * sizeof(sd::LongType)); dim3 launchDims = getLaunchDims("flatten"); flattenKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *context->getCudaStream()>>>( dBuffers, dShapes, dOffsets, inputs.size(), output->specialBuffer(), output->specialShapeInfo(), order); pm.synchronize(); } void flatten(sd::LaunchContext *context, std::vector<NDArray *> &inputs, NDArray *output, char order) { // FIXME: we want NDArrayFactory::prepareSpecialUse here eventually const std::vector<const NDArray *> v(inputs.begin(), inputs.end()); //prepareSpecialUse requires const NDArray::prepareSpecialUse({output}, v, {}); BUILD_SINGLE_SELECTOR(output->dataType(), flatten_, (context, inputs, output, order), SD_COMMON_TYPES); NDArray::registerSpecialUse({output}, {}); } } // namespace helpers } // namespace ops } // namespace sd
a2c5f1397969b6084eef610f3fe06dbbd9e15d49.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "jacobi_iteration.h" /* FIXME: Write the device kernels to solve the Jacobi iterations */ __global__ void jacobi_iteration_kernel_naive(float *A, float *x, float *new_x,float *B, double *ssd, long num_threads) { /* Obtain thread index within the thread block */ int threadX = threadIdx.x; int threadY = threadIdx.y; /* Obtain block index within the grid */ int blockX = blockIdx.x; int blockY = blockIdx.y; /* Find position in matrix */ int column_number = blockDim.x * blockX + threadX; int row_number = blockDim.y * blockY + threadY; int stride = num_threads; int thread_id = row_number * blockDim.x + column_number; // printf("%d %d %d %d %d\n", thread_id, blockDim.x, blockDim.y, row_number, column_number); int i, j; for (i = thread_id; i < MATRIX_SIZE; i += stride) { double sum = -A[i * MATRIX_SIZE + i] * x[i]; for (j = 0; j < MATRIX_SIZE; j++) { sum += A[i * MATRIX_SIZE + j] * x[j]; } /* Update values for the unkowns for the current row. */ new_x[i] = (B[i] - sum)/A[i * MATRIX_SIZE + i]; } __syncthreads (); /* Wait for all threads in thread block to finish */ for (i = thread_id; i < MATRIX_SIZE; i += stride) { // *ssd += (new_x[i] - x[i]) * (new_x[i] - x[i]); atomicAdd(ssd, (new_x[i] - x[i]) * (new_x[i] - x[i])); x[i] = new_x[i]; } return; } __global__ void jacobi_iteration_kernel_optimized(float *A, float *x, float *new_x,float *B, double *ssd, long num_threads) { __shared__ float ssdSub[MATRIX_SIZE]; /* Obtain thread index within the thread block */ int threadX = threadIdx.x; int threadY = threadIdx.y; /* Obtain block index within the grid */ int blockX = blockIdx.x; int blockY = blockIdx.y; /* Find position in matrix */ int column_number = blockDim.x * blockX + threadX; int row_number = blockDim.y * blockY + threadY; int stride = num_threads; int thread_id = row_number * blockDim.x + column_number; // printf("%d %d %d %d %d\n", thread_id, blockDim.x, blockDim.y, row_number, column_number); int i, j; for (i = thread_id; i < MATRIX_SIZE; i += stride) { double sum = -A[i * MATRIX_SIZE + i] * x[i]; for (j = 0; j < MATRIX_SIZE; j++) { sum += A[i + j * MATRIX_SIZE ] * x[j]; } /* Update values for the unkowns for the current row. */ new_x[i] = (B[i] - sum)/A[i * MATRIX_SIZE + i]; } __syncthreads (); for (i = thread_id; i < MATRIX_SIZE; i += stride) { ssdSub[i] = (new_x[i] - x[i]) * (new_x[i] - x[i]); } for (i = thread_id; i < MATRIX_SIZE; i += stride) { atomicAdd(ssd, ssdSub[i]); } return; }
a2c5f1397969b6084eef610f3fe06dbbd9e15d49.cu
#include "jacobi_iteration.h" /* FIXME: Write the device kernels to solve the Jacobi iterations */ __global__ void jacobi_iteration_kernel_naive(float *A, float *x, float *new_x,float *B, double *ssd, long num_threads) { /* Obtain thread index within the thread block */ int threadX = threadIdx.x; int threadY = threadIdx.y; /* Obtain block index within the grid */ int blockX = blockIdx.x; int blockY = blockIdx.y; /* Find position in matrix */ int column_number = blockDim.x * blockX + threadX; int row_number = blockDim.y * blockY + threadY; int stride = num_threads; int thread_id = row_number * blockDim.x + column_number; // printf("%d %d %d %d %d\n", thread_id, blockDim.x, blockDim.y, row_number, column_number); int i, j; for (i = thread_id; i < MATRIX_SIZE; i += stride) { double sum = -A[i * MATRIX_SIZE + i] * x[i]; for (j = 0; j < MATRIX_SIZE; j++) { sum += A[i * MATRIX_SIZE + j] * x[j]; } /* Update values for the unkowns for the current row. */ new_x[i] = (B[i] - sum)/A[i * MATRIX_SIZE + i]; } __syncthreads (); /* Wait for all threads in thread block to finish */ for (i = thread_id; i < MATRIX_SIZE; i += stride) { // *ssd += (new_x[i] - x[i]) * (new_x[i] - x[i]); atomicAdd(ssd, (new_x[i] - x[i]) * (new_x[i] - x[i])); x[i] = new_x[i]; } return; } __global__ void jacobi_iteration_kernel_optimized(float *A, float *x, float *new_x,float *B, double *ssd, long num_threads) { __shared__ float ssdSub[MATRIX_SIZE]; /* Obtain thread index within the thread block */ int threadX = threadIdx.x; int threadY = threadIdx.y; /* Obtain block index within the grid */ int blockX = blockIdx.x; int blockY = blockIdx.y; /* Find position in matrix */ int column_number = blockDim.x * blockX + threadX; int row_number = blockDim.y * blockY + threadY; int stride = num_threads; int thread_id = row_number * blockDim.x + column_number; // printf("%d %d %d %d %d\n", thread_id, blockDim.x, blockDim.y, row_number, column_number); int i, j; for (i = thread_id; i < MATRIX_SIZE; i += stride) { double sum = -A[i * MATRIX_SIZE + i] * x[i]; for (j = 0; j < MATRIX_SIZE; j++) { sum += A[i + j * MATRIX_SIZE ] * x[j]; } /* Update values for the unkowns for the current row. */ new_x[i] = (B[i] - sum)/A[i * MATRIX_SIZE + i]; } __syncthreads (); for (i = thread_id; i < MATRIX_SIZE; i += stride) { ssdSub[i] = (new_x[i] - x[i]) * (new_x[i] - x[i]); } for (i = thread_id; i < MATRIX_SIZE; i += stride) { atomicAdd(ssd, ssdSub[i]); } return; }
55eeca0bdb29a87186d941c37adc5b528217e64b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ static void timedReduction(const float *input, float *output, clock_t *timer) { // __shared__ float shared[2 * blockDim.x]; extern __shared__ float shared[]; const int tid = threadIdx.x; const int bid = blockIdx.x; if (tid == 0) timer[bid] = clock(); // Copy input. shared[tid] = input[tid]; shared[tid + blockDim.x] = input[tid + blockDim.x]; // Perform reduction to find minimum. for (int d = blockDim.x; d > 0; d /= 2) { __syncthreads(); if (tid < d) { float f0 = shared[tid]; float f1 = shared[tid + d]; if (f1 < f0) { shared[tid] = f1; } } } // Write result. if (tid == 0) output[bid] = shared[0]; __syncthreads(); if (tid == 0) timer[bid+gridDim.x] = clock(); }
55eeca0bdb29a87186d941c37adc5b528217e64b.cu
#include "includes.h" __global__ static void timedReduction(const float *input, float *output, clock_t *timer) { // __shared__ float shared[2 * blockDim.x]; extern __shared__ float shared[]; const int tid = threadIdx.x; const int bid = blockIdx.x; if (tid == 0) timer[bid] = clock(); // Copy input. shared[tid] = input[tid]; shared[tid + blockDim.x] = input[tid + blockDim.x]; // Perform reduction to find minimum. for (int d = blockDim.x; d > 0; d /= 2) { __syncthreads(); if (tid < d) { float f0 = shared[tid]; float f1 = shared[tid + d]; if (f1 < f0) { shared[tid] = f1; } } } // Write result. if (tid == 0) output[bid] = shared[0]; __syncthreads(); if (tid == 0) timer[bid+gridDim.x] = clock(); }
0e3f389a2611c1660bcbf5e4c047c3f32b38273b.hip
// !!! This is a file automatically generated by hipify!!! #include "Core/Tensor.h" using namespace EDX; using namespace DeepLearning; using namespace Algorithm; void TestCUDA() { { Tensorf A = { 1,2,3,4,5,8,3,1,4 }; Tensorf B = NestedInitializerList<float, 2>({ {1},{2},{3},{4},{5} }); Tensorf C = A + B + A + A * B; float pHostC[45] = { 0 }; hipMemcpy((void*)pHostC, (void*)C.Data(), C.LinearSize() * sizeof(float), hipMemcpyDeviceToHost); } { Tensorf A = { 9,2,3,4,5 }; Tensorf B = { 9,2,3,4,5 }; A *= Tensorf::Exp(B); float pHostA[5] = { 0 }; hipMemcpy((void*)pHostA, (void*)A.Data(), A.LinearSize() * sizeof(float), hipMemcpyDeviceToHost); } { Tensorf A = Tensorf::LinSpace(0, 40960, 40960); Tensorf sum = Tensorf::StandardDeviation(A); float pHost[1] = { 0 }; hipMemcpy((void*)pHost, (void*)sum.Data(), sum.LinearSize() * sizeof(float), hipMemcpyDeviceToHost); } { Tensorf A = Tensorf::ArrayRange(0, 1000); A.Reshape(5, 10, 4, 5); Tensorf sum = Tensorf::Sum(A, { 0, 2 }, true); float pHost[50] = { 0 }; hipMemcpy((void*)pHost, (void*)sum.Data(), sum.LinearSize() * sizeof(float), hipMemcpyDeviceToHost); } { Tensorf A = { {9,2,3,4,5} }; Tensorf B = NestedInitializerList<float, 2>({ {9},{2},{3},{4},{5} }); Tensorf C = Tensorf::Dot(B, A); float pHost[25] = { 0 }; hipMemcpy((void*)pHost, (void*)C.Data(), C.LinearSize() * sizeof(float), hipMemcpyDeviceToHost); } }
0e3f389a2611c1660bcbf5e4c047c3f32b38273b.cu
#include "Core/Tensor.h" using namespace EDX; using namespace DeepLearning; using namespace Algorithm; void TestCUDA() { { Tensorf A = { 1,2,3,4,5,8,3,1,4 }; Tensorf B = NestedInitializerList<float, 2>({ {1},{2},{3},{4},{5} }); Tensorf C = A + B + A + A * B; float pHostC[45] = { 0 }; cudaMemcpy((void*)pHostC, (void*)C.Data(), C.LinearSize() * sizeof(float), cudaMemcpyDeviceToHost); } { Tensorf A = { 9,2,3,4,5 }; Tensorf B = { 9,2,3,4,5 }; A *= Tensorf::Exp(B); float pHostA[5] = { 0 }; cudaMemcpy((void*)pHostA, (void*)A.Data(), A.LinearSize() * sizeof(float), cudaMemcpyDeviceToHost); } { Tensorf A = Tensorf::LinSpace(0, 40960, 40960); Tensorf sum = Tensorf::StandardDeviation(A); float pHost[1] = { 0 }; cudaMemcpy((void*)pHost, (void*)sum.Data(), sum.LinearSize() * sizeof(float), cudaMemcpyDeviceToHost); } { Tensorf A = Tensorf::ArrayRange(0, 1000); A.Reshape(5, 10, 4, 5); Tensorf sum = Tensorf::Sum(A, { 0, 2 }, true); float pHost[50] = { 0 }; cudaMemcpy((void*)pHost, (void*)sum.Data(), sum.LinearSize() * sizeof(float), cudaMemcpyDeviceToHost); } { Tensorf A = { {9,2,3,4,5} }; Tensorf B = NestedInitializerList<float, 2>({ {9},{2},{3},{4},{5} }); Tensorf C = Tensorf::Dot(B, A); float pHost[25] = { 0 }; cudaMemcpy((void*)pHost, (void*)C.Data(), C.LinearSize() * sizeof(float), cudaMemcpyDeviceToHost); } }
fd32ed6c473e7b8c5ab763f36ddbc0ed760808d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Author: Cao Thanh Tung, Qi Meng Date: 15/03/2011 File Name: cudaMissing.cu This file include all CUDA code to perform the inserting missing sites step =============================================================================== Copyright (c) 2011, School of Computing, National University of Singapore. All rights reserved. Project homepage: http://www.comp.nus.edu.sg/~tants/cdt.html If you use GPU-DT and you like it or have comments on its usefulness etc., we would love to hear from you at <[email protected]>. You may share with us your experience and any possibilities that we may improve the work/code. =============================================================================== Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the National University of University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission from the National University of Singapore. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma warning(disable: 4311 4312) #include <hip/device_functions.h> #include <stdio.h> #include <string.h> #include "../gpudt.h" #include "cudaDecl.h" #include "common.h" #include "cudaCCW.h" #define MAXINT 2147483647 /*********************************************************** * Declarations ***********************************************************/ #define WBLOCK 256 #define INSERT_TRIANGLE(v0, v1, v2, tri) \ tmp = tri; \ cnewtri[tmp] = step; \ ctriangles[tmp * 9 + 3] = v2; \ ctriangles[tmp * 9 + 4] = v0; \ ctriangles[tmp * 9 + 5] = v1; \ ctriangles[tmp * 9 + 6] = atomicExch(&cvertarr[v0], (tmp << 2)); \ ctriangles[tmp * 9 + 7] = atomicExch(&cvertarr[v1], (tmp << 2) | 1); \ ctriangles[tmp * 9 + 8] = atomicExch(&cvertarr[v2], (tmp << 2) | 2); \ #define SET_TRIANGLE(vOrg, vDest, vApex, nOrg, nDest, nApex, tri, ori) \ ctriangles[(tri) * 9 + 3 + ((ori) + 1) % 3] = (vOrg); \ ctriangles[(tri) * 9 + 3 + ((ori) + 2) % 3] = (vDest); \ ctriangles[(tri) * 9 + 3 + (ori)] = (vApex); \ ctriangles[(tri) * 9 + 6 + (ori)] = (nOrg); \ ctriangles[(tri) * 9 + 6 + ((ori) + 1) % 3] = (nDest); \ ctriangles[(tri) * 9 + 6 + ((ori) + 2) % 3] = (nApex) #define UPDATE_TEMP_LINK(pTriOri, pNext) \ if ((pTriOri) >= 0) \ ctriangles[decode_tri(pTriOri) * 9 + 6 + decode_ori(pTriOri)] = -(pNext) #define UPDATE_LINK(pTriOri, pNext) \ if ((pTriOri) >= 0) \ ctriangles[decode_tri(pTriOri) * 9 + decode_ori(pTriOri)] = (pNext) /************************************************************** * Exported methods **************************************************************/ extern "C" void cudaMissing(); /************************************************************** * Definitions **************************************************************/ // Decode an oriented triangle. // An oriented triangle consists of 32 bits. // - 30 highest bits represent the triangle index, // - 2 lowest bits represent the orientation (the starting vertex, 0, 1 or 2) #define decode_tri(x) ((x) >> 2) #define decode_ori(x) ((x) & 3) #define encode_tri(tri, ori) (((tri) << 2) | (ori)) #define MAX(x, y) ((x) < (y) ? (y) : (x)) /************************************************************ * Variables and functions shared with the main module ************************************************************/ extern int nTris, nVerts, nPoints,nConstraints; extern int *ctriangles; extern int *cvertarr; extern int *tvertices; extern REAL2 *covertices; extern short *cnewtri; extern int step; extern int *cflag; /******************************************************************* * Fill an array with increasing numbers *******************************************************************/ __global__ void kernelFillIncrement(int *list, int start, int length) { int x = blockIdx.x * blockDim.x + threadIdx.x; int noThreads = blockDim.x * gridDim.x; for (; x < length; x += noThreads) list[x] = start + x; } /******************************************************************** * Collect all dead triangles into a list. ********************************************************************/ __global__ void kernelMarkDeadTriangles(int *cmarker, short *cnewtri, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris) return ; cmarker[x] = (cnewtri[x] >= 0 ? 0 : 1); } __global__ void kernelCollectDeadTriangles(int *cdeadTri, short *cnewtri, int *cmarker, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] >= 0) return ; int id = cmarker[x]; cdeadTri[id] = x; } /******************************************************************** * Locate the triangle which we are gonna insert a missing site to. * - If the anchor of the missing site is not yet inserted, skip * - Locate the triangle and mark it to avoid two insertions into * the same triangle. * - Guarantee that the missing site is not on the boundary due to * huge fake boundary added. ********************************************************************/ __global__ void kernelLocateTriangleContainer(int *ctriangles, int *cvertarr, int *tvertices, int *clocation, int *ctags, REAL2 *covertices, int nVerts, int *active, int noActive) { int t = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if ( t >= noActive ) return ; int x = active[t]; int anchor = tvertices[x]; if (cvertarr[anchor] < 0) { clocation[x] = MAXINT; return ; } REAL2 v = covertices[x]; REAL2 vanchor = covertices[anchor]; REAL ccDest, ccApex, ccOpposite; int pNextTri, pTri, pOri; pNextTri = cvertarr[anchor]; pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); int pDest, pApex, pOrg, pTmp; REAL2 vDest, vApex, vOrg, vTmp; pDest = ctriangles[pTri * 9 + 3 + (pOri + 2) % 3]; // Dest vDest = covertices[pDest]; ccDest = cuda_ccw(vanchor, vDest, v); do { pApex = ctriangles[pTri * 9 + 3 + pOri]; // apex vApex = covertices[pApex]; ccApex = cuda_ccw(vanchor, vApex, v); if (ccDest >= 0.0 && ccApex <= 0.0) // Inside the angle break; pDest = pApex; vDest = vApex; ccDest = ccApex; pNextTri = ctriangles[pTri * 9 + (pOri + 2) % 3]; pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); } while (true); // Found an angle, now look for the actual triangle // containing me. ccOpposite = cuda_ccw(vDest, vApex, v); if (ccOpposite < 0.0) { // It's not right here, need to walk a bit further while (true) { // Get the opposite triangle pNextTri = ctriangles[pTri * 9 + (pOri + 1) % 3]; //if (pNextTri < 0) { // cvertarr[x] = -100; // clocation[x] = encode_tri(pTri, (pOri + 1) % 3); // return ; //} pTri = decode_tri(pNextTri); // Rotate the triangle so that the org is opposite the previous org pOri = (decode_ori(pNextTri) + 2) % 3; pOrg = ctriangles[pTri * 9 + 3 + (pOri + 1) % 3]; vOrg = covertices[pOrg]; pTmp = pDest; pDest = pApex; pApex = pTmp; vTmp = vDest; vDest = vApex; vApex = vTmp; ccDest = cuda_ccw(vOrg, vDest, v); ccApex = cuda_ccw(vApex, vOrg, v); bool moveleft; if (ccDest >= 0.0) if (ccApex >= 0.0) // Found it! break; else moveleft = false; else if (ccApex >= 0.0) moveleft = true; else moveleft = (vOrg.x - v.x) * (vApex.x - vDest.x) + (vOrg.y - v.y) * (vApex.y - vDest.y) > 0.0; if (moveleft) { pOri = (pOri + 2) % 3; pApex = pDest; pDest = pOrg; vApex = vDest; vDest = vOrg; ccOpposite = ccDest; // Orientation is unimportant } else { pOri = (pOri + 1) % 3; pDest = pApex; pApex = pOrg; vDest = vApex; vApex = vOrg; ccOpposite = ccApex; } } } int c0 = 0; if (ccDest == 0.0) c0++; if (ccApex == 0.0) c0++; if (ccOpposite == 0.0) c0++; if (c0 == 0) { // Easiest case, it's right here! clocation[x] = pNextTri + 1; // Mark to indicate that it's a simple case. atomicMin(&ctags[pTri], x); } else if (c0 > 1) { // Duplicate point clocation[x] = pNextTri + 1; cvertarr[x] = -2; active[t] = -1; return ; } else { // On an edge. // Make sure our 'location' triangle always face toward that edge // (i.e. that edge will be opposite to the origin) if (ccDest == 0.0) pOri = (pOri + 2) % 3; else if (ccApex == 0.0) pOri = (pOri + 1) % 3; clocation[x] = -encode_tri(pTri, pOri) - 1; // To avoid deadlock when 3 sites want to insert on 3 edges, // and they try to mark 3 pairs triangles: (a, b), (b, c), (c, a) atomicMin(&ctags[pTri], x); atomicMin(&ctags[decode_tri(ctriangles[pTri * 9 + (pOri + 1) % 3])], x); } } /*************************************************************************** * Determine which missing point insertion can be take place, * mark those triangles that need to be deleted. ***************************************************************************/ __global__ void kernelPreprocessTriangles(int *ctriangles, int *cvertarr, int *clocation, int *ctags, int *tvertices, short *cnewtri, int *cmarker, BYTE *caffected, int nVerts, int step, int *active, int noActive) { int t = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if ( t >= noActive ) return ; int x = active[t]; int pNextTri = clocation[x]; if (pNextTri == MAXINT) return ; int pTri = decode_tri(abs(pNextTri) - 1); int pOri = decode_ori(abs(pNextTri) - 1); int popp, pOppTri, pOppOri; bool success; if (pNextTri >= 0) // one triangle success = (ctags[pTri] == x); else { popp = (ctriangles[pTri * 9 + (pOri + 1) % 3]); pOppTri = decode_tri(popp); pOppOri = decode_ori(popp); success = (ctags[pTri] == x && ctags[pOppTri] == x); } if (success) { cmarker[x] = 2; cnewtri[pTri] = -step; caffected[ctriangles[pTri * 9 + 3]] = 1; caffected[ctriangles[pTri * 9 + 4]] = 1; caffected[ctriangles[pTri * 9 + 5]] = 1; if (pNextTri < 0) { cnewtri[pOppTri] = -step; caffected[ctriangles[pOppTri * 9 + 3 + pOppOri]] = 1; } } } /************************************************************ * Fix the vertex array for those affected sites ************************************************************/ __global__ void kernelFixVertexArrayMissing(int *ctriangles, int *cvertarr, BYTE *caffected, short *cnewtri, int nVerts) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nVerts || caffected[x] != 1) return ; int p = cvertarr[x], pnext = p; // Find the first valid triangle while (pnext >= 0 && cnewtri[decode_tri(pnext)] < 0) pnext = ctriangles[decode_tri(pnext) * 9 + 6 + decode_ori(pnext)]; if (pnext != p) cvertarr[x] = pnext; while (pnext >= 0) { // Find an invalid triangle do { p = pnext; pnext = ctriangles[decode_tri(p) * 9 + 6 + decode_ori(p)]; } while (pnext >= 0 && cnewtri[decode_tri(pnext)] >= 0); if (pnext >= 0) { // Now pnext is deleted, so we fix the link for p. // Find the next valid triangle while (pnext >= 0 && cnewtri[decode_tri(pnext)] < 0) pnext = ctriangles[decode_tri(pnext) * 9 + 6 + decode_ori(pnext)]; ctriangles[decode_tri(p) * 9 + 6 + decode_ori(p)] = pnext; } } } __global__ void kernelInsertMissingSites(int *ctriangles, int *cvertarr, int *clocation, int *cmarker, int *cavailtri, int *cprefix, short *cnewtri, int nVerts, int step, int *active, int noActive) { int t = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if ( t >= noActive ) return; int x = active[t]; if (cmarker[x] != 2) return ; active[t] = -1; int pNextTri = clocation[x]; int pTri = decode_tri(abs(pNextTri) - 1); int pOri = decode_ori(abs(pNextTri) - 1); int pOrg, pDest, pApex, pOpposite; int t1, t2, t3, t4, tmp; int offset = cprefix[x]; t1 = pTri; t2 = cavailtri[offset]; t3 = cavailtri[offset + 1]; pApex = ctriangles[pTri * 9 + 3 + pOri]; pOrg = ctriangles[pTri * 9 + 3 + (pOri + 1) % 3]; pDest = ctriangles[pTri * 9 + 3 + (pOri + 2) % 3]; if (pNextTri >= 0) { // one triangle INSERT_TRIANGLE(pOrg, pDest, x, t1); INSERT_TRIANGLE(pDest, pApex, x, t2); INSERT_TRIANGLE(pApex, pOrg, x, t3); } else { int nDest = ctriangles[pTri * 9 + (pOri + 1) % 3]; int pOppTri = decode_tri(nDest); int pOppOri = decode_ori(nDest); pOpposite = ctriangles[pOppTri * 9 + 3 + pOppOri]; t4 = pOppTri; INSERT_TRIANGLE(pOrg, pDest, x, t1); INSERT_TRIANGLE(pDest, pOpposite, x, t2); INSERT_TRIANGLE(pOpposite, pApex, x, t3); INSERT_TRIANGLE(pApex, pOrg, x, t4); } } /****************************************************************** * Update the links between triangles after adding new triangles ******************************************************************/ __global__ void kernelUpdateMissingTriangleLinks(int *ctriangles, int *cvertarr, short *cnewtri, int nTris, int step) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] != step) return ; int p0, p1, p2, n0 = -1, n1 = -1, n2 = -1; int nCounter, pNextTri, pTri, pOri, pTri9; int x9 = x * 9; p2 = ctriangles[x9 + 3]; p1 = ctriangles[x9 + 5]; p0 = ctriangles[x9 + 4]; nCounter = 0; // orientation 0 // Travel through the list of triangles sharing vertex 0 with this triangle. // In this list we can find at most two triangles sharing edge (p0, p1) and // (p2, p0) with our triangle. pNextTri = cvertarr[p0]; while (pNextTri >= 0 && nCounter < 2) { pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); pTri9 = pTri * 9; if (p2 == ctriangles[pTri9 + 3 + (pOri + 2) % 3]) { // NextDest n2 = pNextTri; ctriangles[pTri9 + pOri] = (x << 2) | 2; nCounter++; } if (p1 == ctriangles[pTri9 + 3 + pOri]) { // NextApex n0 = (pTri << 2) | ((pOri + 2) % 3); ctriangles[pTri9 + (pOri + 2) % 3] = (x << 2); nCounter++; } pNextTri = ctriangles[pTri9 + 6 + pOri]; } // orientation 1 // Find the triangle with edge (p1, p2) pNextTri = cvertarr[p1]; while (pNextTri >= 0) { pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); pTri9 = pTri * 9; if (p2 == ctriangles[pTri9 + 3 + pOri]) { // NextApex n1 = (pTri << 2) | ((pOri + 2) % 3); ctriangles[pTri9 + (pOri + 2) % 3] = (x << 2) | 1; break ; } pNextTri = ctriangles[pTri9 + 6 + pOri]; } ctriangles[x9 + 0] = n0; ctriangles[x9 + 1] = n1; ctriangles[x9 + 2] = n2; } /******************************************************************** * Fix vertex array ********************************************************************/ __global__ void kernelMarkValidTriangles1(short *cnewtri, int *cvalid, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris) return ; cvalid[x] = (cnewtri[x] >= 0) ? 1 : 0; } __global__ void kernelCollectEmptySlots1(short *cnewtri, int *cprefix, int *cempty, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] >= 0) return ; int id = x - cprefix[x]; cempty[id] = x; } __global__ void kernelFillEmptySlots1(short *cnewtri, int *cprefix, int *cempty, int *ctriangles, int nTris, int newnTris, int offset) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] < 0) return ; int value; if (x < newnTris) value = x; else { value = cempty[cprefix[x] - offset]; for (int i = 0; i < 9; i++) ctriangles[value * 9 + i] = ctriangles[x * 9 + i]; } cprefix[x] = value; } __global__ void kernelFixIndices1(int *ctriangles, int *newindex, int nTris) { __shared__ int ct[WBLOCK * 9]; int tId = threadIdx.x; int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x, x9 = x * 9; int i, id; if (x >= nTris) return ; // Cooperatively read all triangles processed by one block for (i = 0, id = tId; i < 9; i++, id += WBLOCK) ct[id] = ctriangles[x9 + id]; __syncthreads(); if (x + tId < nTris) { i = tId * 9; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; i++; i++; i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); } __syncthreads(); for (i = 0, id = tId; i < 9; i++, id += WBLOCK) ctriangles[x9 + id] = ct[id]; } /******************************************************************** * Insert missing sites caused by overlapping or bad case shifting ********************************************************************/ void cudaMissing() { hipFuncSetCacheConfig( kernelLocateTriangleContainer, hipFuncCachePreferL1 ); hipFuncSetCacheConfig( kernelUpdateMissingTriangleLinks, hipFuncCachePreferL1 ); cutilSafeCall( hipMemcpyToSymbol(constData, hostConst, 13 * sizeof(REAL)) ); // Collect dead triangles, insert new triangles to these slots first. int *cdeadtri, *cmarker, *cprefix; cutilSafeCall( hipMalloc((void **) &cprefix, 2 * nVerts * sizeof(int)) ); cutilSafeCall( hipMalloc((void **) &cmarker, 2 * nVerts * sizeof(int)) ); dim3 block = dim3(WBLOCK); dim3 grid = dim3(STRIPE, nTris / (STRIPE * block.x) + 1); int lastItem; // Mark all dead triangles as 1 in cmarker array, 0 if not. hipLaunchKernelGGL(( kernelMarkDeadTriangles), dim3(grid), dim3(block) , 0, 0, cmarker, cnewtri, nTris); cutilCheckError(); cutilSafeCall( hipMemcpy(&lastItem, cmarker + nTris - 1, sizeof(int), hipMemcpyDeviceToHost) ); // Use prefix sum to compute the offset thrust::exclusive_scan( IntDPtr(cmarker), IntDPtr(cmarker) + nTris, IntDPtr(cprefix) ); // Temporary release what we no longer need. cutilSafeCall( hipFree(cmarker)); // Compute the size needed for the list of dead triangles // We also store the unused slots (after nTris but less than 2 * nVerts) int deadCounter; cutilSafeCall( hipMemcpy(&deadCounter, cprefix + nTris - 1, sizeof(int), hipMemcpyDeviceToHost) ); deadCounter += lastItem; int tailTri = nVerts * 2 - nTris; int deadListSize = deadCounter + tailTri; cutilSafeCall( hipMalloc((void **) &cdeadtri, deadListSize * sizeof(int)) ); // Collect these dead triangles into the hipLaunchKernelGGL(( kernelCollectDeadTriangles), dim3(grid), dim3(block) , 0, 0, cdeadtri, cnewtri, cprefix, nTris); cutilCheckError(); //printf("Dead triangles: %i\n", deadCounter); grid = dim3(256); //tailTri / block.x + 1); hipLaunchKernelGGL(( kernelFillIncrement), dim3(grid), dim3(block) , 0, 0, cdeadtri + deadCounter, nTris, tailTri); cutilCheckError(); /******************************************************** * Process missing sites ********************************************************/ int *active; cutilSafeCall( hipMalloc( (void**)&active, nVerts * sizeof(int) )); IntDPtr activePtr( active ); IntDPtr cvertarrPtr( cvertarr ); thrust::sequence( activePtr, activePtr + nVerts ); IntDPtr lastActivePtr = thrust::remove_if( activePtr, activePtr + nVerts, cvertarrPtr, isNotMinusOne() ); int noActive = lastActivePtr - activePtr; int *clocation; BYTE *caffected; cutilSafeCall( hipMalloc((void **) &caffected, nVerts * sizeof(BYTE)) ); cutilSafeCall( hipMalloc((void **) &clocation, nVerts * sizeof(int)) ); cutilSafeCall( hipMalloc((void **) &cmarker, nVerts * sizeof(int)) ); block = dim3(128); dim3 gridFull = dim3(STRIPE, nVerts / (STRIPE * block.x) + 1); int triUsed = 0; do { // cprefix will be used as a marker for voting which insertion can be processed cutilSafeCall( hipMemset(cprefix, 127, nVerts * 2 * sizeof(int)) ); cutilSafeCall( hipMemset(cflag, 0, sizeof(int)) ); // Locate triangles containing the missing sites grid = dim3(STRIPE, noActive / (STRIPE * block.x) + 1); hipLaunchKernelGGL(( kernelLocateTriangleContainer), dim3(grid), dim3(block) , 0, 0, ctriangles, cvertarr, tvertices, clocation, cprefix, covertices, nVerts, active, noActive); cutilCheckError(); cutilSafeCall( hipMemset(cmarker, 0, nVerts * sizeof(int)) ); cutilSafeCall( hipMemset(caffected, 0, nVerts) ); // Determine which missing point insertion can be done in this pass hipLaunchKernelGGL(( kernelPreprocessTriangles), dim3(grid), dim3(block) , 0, 0, ctriangles, cvertarr, clocation, cprefix, tvertices, cnewtri, cmarker, caffected, nVerts, step, active, noActive); // In cmarker we have the number of new triangles // that will be generated by inserting each site (0 or 2). thrust::exclusive_scan( IntDPtr(cmarker), IntDPtr(cmarker) + nVerts, IntDPtr(cprefix) ); // We remove the container triangle and fix the vertex array. hipLaunchKernelGGL(( kernelFixVertexArrayMissing), dim3(gridFull), dim3(block) , 0, 0, ctriangles, cvertarr, caffected, cnewtri, nVerts); // We then insert three new triangles for each missing site inserted. hipLaunchKernelGGL(( kernelInsertMissingSites), dim3(grid), dim3(block) , 0, 0, ctriangles, cvertarr, clocation, cmarker, cdeadtri + triUsed, cprefix, cnewtri, nVerts, step, active, noActive); // Update the offset in the dead triangle list cutilSafeCall( hipMemcpy(&lastItem, cmarker + nVerts - 1, sizeof(int), hipMemcpyDeviceToHost) ); int used; cutilSafeCall( hipMemcpy(&used, cprefix + nVerts - 1, sizeof(int), hipMemcpyDeviceToHost) ); triUsed += used + lastItem; int newsize = MAX(nTris, nTris - deadCounter + triUsed); // Update links between the new triangles and the old one. grid = dim3(STRIPE, newsize / (STRIPE * block.x) + 1); hipLaunchKernelGGL(( kernelUpdateMissingTriangleLinks), dim3(grid), dim3(block) , 0, 0, ctriangles, cvertarr, cnewtri, newsize, step); //printf("--------Insert missing sites - step %i ; Inserted %i triangles\n", step, used + lastItem); IntDPtr lastActivePtr = thrust::remove_if( activePtr, activePtr + noActive, isNegative() ); noActive = lastActivePtr - activePtr; step++; } while (noActive > 0); cutilSafeCall( hipFree( active ) ); // We do not keep track of the dead triangles after this, // because after removing the fake boundary, there would be a lot more, and they // will be recompute and recompact by then. deadCounter -= triUsed; // If we have used up all the dead triangles and more, we update nTris if (deadCounter < 0) nTris -= deadCounter; /******* DONE *******/ /********************************************************* * Compact the triangle list *********************************************************/ cutilSafeCall( hipFree(cmarker)); cutilSafeCall( hipFree(clocation)); cutilSafeCall( hipFree(caffected)); cutilSafeCall( hipFree(cprefix)); cutilSafeCall( hipFree(tvertices)); cutilSafeCall( hipFree(cdeadtri) ); /********************************************************* * Compact the triangle list *********************************************************/ if(deadCounter>0) { int *cvalid, *cprefix1; cutilSafeCall( hipMalloc((void **) &cvalid, 2 * nVerts * sizeof(int)) ); cutilSafeCall( hipMalloc((void **) &cprefix1, 2 * nVerts * sizeof(int)) ); block = dim3(WBLOCK); grid = dim3(STRIPE, nTris / (STRIPE * block.x) + 1); // Mark the valid triangles in the list hipLaunchKernelGGL(( kernelMarkValidTriangles1), dim3(grid), dim3(block) , 0, 0, cnewtri, cvalid, nTris); cutilCheckError(); // Compute the offset of them in the new list thrust::exclusive_scan( IntDPtr(cvalid), IntDPtr(cvalid) + nTris, IntDPtr(cprefix1) ); int newnTris, lastitem, offset; cutilSafeCall( hipMemcpy(&newnTris, cprefix1 + nTris - 1, sizeof(int), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(&lastitem, cvalid + nTris - 1, sizeof(int), hipMemcpyDeviceToHost) ); newnTris += lastitem; cutilSafeCall( hipMemcpy(&offset, cprefix1 + newnTris, sizeof(int), hipMemcpyDeviceToHost) ); // printf("nTris = %i, new nTris = %i\n", nTris, newnTris); // Find all empty slots in the list hipLaunchKernelGGL(( kernelCollectEmptySlots1), dim3(grid), dim3(block) , 0, 0, cnewtri, cprefix1, cvalid, nTris); cutilCheckError(); // Move those valid triangles at the end of the list // to the holes in the list. grid = dim3(STRIPE, nTris / (STRIPE * block.x) + 1); hipLaunchKernelGGL(( kernelFillEmptySlots1), dim3(grid), dim3(block) , 0, 0, cnewtri, cprefix1, cvalid, ctriangles, nTris, newnTris, offset); cutilCheckError(); // Fix the links after the index of our triangles are mixed up grid = dim3(STRIPE, newnTris / (STRIPE * block.x) + 1); hipLaunchKernelGGL(( kernelFixIndices1), dim3(grid), dim3(block) , 0, 0, ctriangles, cprefix1, newnTris); cutilCheckError(); cutilSafeCall( hipFree(cprefix1)); cutilSafeCall( hipFree(cvalid)); nTris = newnTris; } }
fd32ed6c473e7b8c5ab763f36ddbc0ed760808d4.cu
/* Author: Cao Thanh Tung, Qi Meng Date: 15/03/2011 File Name: cudaMissing.cu This file include all CUDA code to perform the inserting missing sites step =============================================================================== Copyright (c) 2011, School of Computing, National University of Singapore. All rights reserved. Project homepage: http://www.comp.nus.edu.sg/~tants/cdt.html If you use GPU-DT and you like it or have comments on its usefulness etc., we would love to hear from you at <[email protected]>. You may share with us your experience and any possibilities that we may improve the work/code. =============================================================================== Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the National University of University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission from the National University of Singapore. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma warning(disable: 4311 4312) #include <device_functions.h> #include <stdio.h> #include <string.h> #include "../gpudt.h" #include "cudaDecl.h" #include "common.h" #include "cudaCCW.h" #define MAXINT 2147483647 /*********************************************************** * Declarations ***********************************************************/ #define WBLOCK 256 #define INSERT_TRIANGLE(v0, v1, v2, tri) \ tmp = tri; \ cnewtri[tmp] = step; \ ctriangles[tmp * 9 + 3] = v2; \ ctriangles[tmp * 9 + 4] = v0; \ ctriangles[tmp * 9 + 5] = v1; \ ctriangles[tmp * 9 + 6] = atomicExch(&cvertarr[v0], (tmp << 2)); \ ctriangles[tmp * 9 + 7] = atomicExch(&cvertarr[v1], (tmp << 2) | 1); \ ctriangles[tmp * 9 + 8] = atomicExch(&cvertarr[v2], (tmp << 2) | 2); \ #define SET_TRIANGLE(vOrg, vDest, vApex, nOrg, nDest, nApex, tri, ori) \ ctriangles[(tri) * 9 + 3 + ((ori) + 1) % 3] = (vOrg); \ ctriangles[(tri) * 9 + 3 + ((ori) + 2) % 3] = (vDest); \ ctriangles[(tri) * 9 + 3 + (ori)] = (vApex); \ ctriangles[(tri) * 9 + 6 + (ori)] = (nOrg); \ ctriangles[(tri) * 9 + 6 + ((ori) + 1) % 3] = (nDest); \ ctriangles[(tri) * 9 + 6 + ((ori) + 2) % 3] = (nApex) #define UPDATE_TEMP_LINK(pTriOri, pNext) \ if ((pTriOri) >= 0) \ ctriangles[decode_tri(pTriOri) * 9 + 6 + decode_ori(pTriOri)] = -(pNext) #define UPDATE_LINK(pTriOri, pNext) \ if ((pTriOri) >= 0) \ ctriangles[decode_tri(pTriOri) * 9 + decode_ori(pTriOri)] = (pNext) /************************************************************** * Exported methods **************************************************************/ extern "C" void cudaMissing(); /************************************************************** * Definitions **************************************************************/ // Decode an oriented triangle. // An oriented triangle consists of 32 bits. // - 30 highest bits represent the triangle index, // - 2 lowest bits represent the orientation (the starting vertex, 0, 1 or 2) #define decode_tri(x) ((x) >> 2) #define decode_ori(x) ((x) & 3) #define encode_tri(tri, ori) (((tri) << 2) | (ori)) #define MAX(x, y) ((x) < (y) ? (y) : (x)) /************************************************************ * Variables and functions shared with the main module ************************************************************/ extern int nTris, nVerts, nPoints,nConstraints; extern int *ctriangles; extern int *cvertarr; extern int *tvertices; extern REAL2 *covertices; extern short *cnewtri; extern int step; extern int *cflag; /******************************************************************* * Fill an array with increasing numbers *******************************************************************/ __global__ void kernelFillIncrement(int *list, int start, int length) { int x = blockIdx.x * blockDim.x + threadIdx.x; int noThreads = blockDim.x * gridDim.x; for (; x < length; x += noThreads) list[x] = start + x; } /******************************************************************** * Collect all dead triangles into a list. ********************************************************************/ __global__ void kernelMarkDeadTriangles(int *cmarker, short *cnewtri, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris) return ; cmarker[x] = (cnewtri[x] >= 0 ? 0 : 1); } __global__ void kernelCollectDeadTriangles(int *cdeadTri, short *cnewtri, int *cmarker, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] >= 0) return ; int id = cmarker[x]; cdeadTri[id] = x; } /******************************************************************** * Locate the triangle which we are gonna insert a missing site to. * - If the anchor of the missing site is not yet inserted, skip * - Locate the triangle and mark it to avoid two insertions into * the same triangle. * - Guarantee that the missing site is not on the boundary due to * huge fake boundary added. ********************************************************************/ __global__ void kernelLocateTriangleContainer(int *ctriangles, int *cvertarr, int *tvertices, int *clocation, int *ctags, REAL2 *covertices, int nVerts, int *active, int noActive) { int t = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if ( t >= noActive ) return ; int x = active[t]; int anchor = tvertices[x]; if (cvertarr[anchor] < 0) { clocation[x] = MAXINT; return ; } REAL2 v = covertices[x]; REAL2 vanchor = covertices[anchor]; REAL ccDest, ccApex, ccOpposite; int pNextTri, pTri, pOri; pNextTri = cvertarr[anchor]; pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); int pDest, pApex, pOrg, pTmp; REAL2 vDest, vApex, vOrg, vTmp; pDest = ctriangles[pTri * 9 + 3 + (pOri + 2) % 3]; // Dest vDest = covertices[pDest]; ccDest = cuda_ccw(vanchor, vDest, v); do { pApex = ctriangles[pTri * 9 + 3 + pOri]; // apex vApex = covertices[pApex]; ccApex = cuda_ccw(vanchor, vApex, v); if (ccDest >= 0.0 && ccApex <= 0.0) // Inside the angle break; pDest = pApex; vDest = vApex; ccDest = ccApex; pNextTri = ctriangles[pTri * 9 + (pOri + 2) % 3]; pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); } while (true); // Found an angle, now look for the actual triangle // containing me. ccOpposite = cuda_ccw(vDest, vApex, v); if (ccOpposite < 0.0) { // It's not right here, need to walk a bit further while (true) { // Get the opposite triangle pNextTri = ctriangles[pTri * 9 + (pOri + 1) % 3]; //if (pNextTri < 0) { // cvertarr[x] = -100; // clocation[x] = encode_tri(pTri, (pOri + 1) % 3); // return ; //} pTri = decode_tri(pNextTri); // Rotate the triangle so that the org is opposite the previous org pOri = (decode_ori(pNextTri) + 2) % 3; pOrg = ctriangles[pTri * 9 + 3 + (pOri + 1) % 3]; vOrg = covertices[pOrg]; pTmp = pDest; pDest = pApex; pApex = pTmp; vTmp = vDest; vDest = vApex; vApex = vTmp; ccDest = cuda_ccw(vOrg, vDest, v); ccApex = cuda_ccw(vApex, vOrg, v); bool moveleft; if (ccDest >= 0.0) if (ccApex >= 0.0) // Found it! break; else moveleft = false; else if (ccApex >= 0.0) moveleft = true; else moveleft = (vOrg.x - v.x) * (vApex.x - vDest.x) + (vOrg.y - v.y) * (vApex.y - vDest.y) > 0.0; if (moveleft) { pOri = (pOri + 2) % 3; pApex = pDest; pDest = pOrg; vApex = vDest; vDest = vOrg; ccOpposite = ccDest; // Orientation is unimportant } else { pOri = (pOri + 1) % 3; pDest = pApex; pApex = pOrg; vDest = vApex; vApex = vOrg; ccOpposite = ccApex; } } } int c0 = 0; if (ccDest == 0.0) c0++; if (ccApex == 0.0) c0++; if (ccOpposite == 0.0) c0++; if (c0 == 0) { // Easiest case, it's right here! clocation[x] = pNextTri + 1; // Mark to indicate that it's a simple case. atomicMin(&ctags[pTri], x); } else if (c0 > 1) { // Duplicate point clocation[x] = pNextTri + 1; cvertarr[x] = -2; active[t] = -1; return ; } else { // On an edge. // Make sure our 'location' triangle always face toward that edge // (i.e. that edge will be opposite to the origin) if (ccDest == 0.0) pOri = (pOri + 2) % 3; else if (ccApex == 0.0) pOri = (pOri + 1) % 3; clocation[x] = -encode_tri(pTri, pOri) - 1; // To avoid deadlock when 3 sites want to insert on 3 edges, // and they try to mark 3 pairs triangles: (a, b), (b, c), (c, a) atomicMin(&ctags[pTri], x); atomicMin(&ctags[decode_tri(ctriangles[pTri * 9 + (pOri + 1) % 3])], x); } } /*************************************************************************** * Determine which missing point insertion can be take place, * mark those triangles that need to be deleted. ***************************************************************************/ __global__ void kernelPreprocessTriangles(int *ctriangles, int *cvertarr, int *clocation, int *ctags, int *tvertices, short *cnewtri, int *cmarker, BYTE *caffected, int nVerts, int step, int *active, int noActive) { int t = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if ( t >= noActive ) return ; int x = active[t]; int pNextTri = clocation[x]; if (pNextTri == MAXINT) return ; int pTri = decode_tri(abs(pNextTri) - 1); int pOri = decode_ori(abs(pNextTri) - 1); int popp, pOppTri, pOppOri; bool success; if (pNextTri >= 0) // one triangle success = (ctags[pTri] == x); else { popp = (ctriangles[pTri * 9 + (pOri + 1) % 3]); pOppTri = decode_tri(popp); pOppOri = decode_ori(popp); success = (ctags[pTri] == x && ctags[pOppTri] == x); } if (success) { cmarker[x] = 2; cnewtri[pTri] = -step; caffected[ctriangles[pTri * 9 + 3]] = 1; caffected[ctriangles[pTri * 9 + 4]] = 1; caffected[ctriangles[pTri * 9 + 5]] = 1; if (pNextTri < 0) { cnewtri[pOppTri] = -step; caffected[ctriangles[pOppTri * 9 + 3 + pOppOri]] = 1; } } } /************************************************************ * Fix the vertex array for those affected sites ************************************************************/ __global__ void kernelFixVertexArrayMissing(int *ctriangles, int *cvertarr, BYTE *caffected, short *cnewtri, int nVerts) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nVerts || caffected[x] != 1) return ; int p = cvertarr[x], pnext = p; // Find the first valid triangle while (pnext >= 0 && cnewtri[decode_tri(pnext)] < 0) pnext = ctriangles[decode_tri(pnext) * 9 + 6 + decode_ori(pnext)]; if (pnext != p) cvertarr[x] = pnext; while (pnext >= 0) { // Find an invalid triangle do { p = pnext; pnext = ctriangles[decode_tri(p) * 9 + 6 + decode_ori(p)]; } while (pnext >= 0 && cnewtri[decode_tri(pnext)] >= 0); if (pnext >= 0) { // Now pnext is deleted, so we fix the link for p. // Find the next valid triangle while (pnext >= 0 && cnewtri[decode_tri(pnext)] < 0) pnext = ctriangles[decode_tri(pnext) * 9 + 6 + decode_ori(pnext)]; ctriangles[decode_tri(p) * 9 + 6 + decode_ori(p)] = pnext; } } } __global__ void kernelInsertMissingSites(int *ctriangles, int *cvertarr, int *clocation, int *cmarker, int *cavailtri, int *cprefix, short *cnewtri, int nVerts, int step, int *active, int noActive) { int t = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if ( t >= noActive ) return; int x = active[t]; if (cmarker[x] != 2) return ; active[t] = -1; int pNextTri = clocation[x]; int pTri = decode_tri(abs(pNextTri) - 1); int pOri = decode_ori(abs(pNextTri) - 1); int pOrg, pDest, pApex, pOpposite; int t1, t2, t3, t4, tmp; int offset = cprefix[x]; t1 = pTri; t2 = cavailtri[offset]; t3 = cavailtri[offset + 1]; pApex = ctriangles[pTri * 9 + 3 + pOri]; pOrg = ctriangles[pTri * 9 + 3 + (pOri + 1) % 3]; pDest = ctriangles[pTri * 9 + 3 + (pOri + 2) % 3]; if (pNextTri >= 0) { // one triangle INSERT_TRIANGLE(pOrg, pDest, x, t1); INSERT_TRIANGLE(pDest, pApex, x, t2); INSERT_TRIANGLE(pApex, pOrg, x, t3); } else { int nDest = ctriangles[pTri * 9 + (pOri + 1) % 3]; int pOppTri = decode_tri(nDest); int pOppOri = decode_ori(nDest); pOpposite = ctriangles[pOppTri * 9 + 3 + pOppOri]; t4 = pOppTri; INSERT_TRIANGLE(pOrg, pDest, x, t1); INSERT_TRIANGLE(pDest, pOpposite, x, t2); INSERT_TRIANGLE(pOpposite, pApex, x, t3); INSERT_TRIANGLE(pApex, pOrg, x, t4); } } /****************************************************************** * Update the links between triangles after adding new triangles ******************************************************************/ __global__ void kernelUpdateMissingTriangleLinks(int *ctriangles, int *cvertarr, short *cnewtri, int nTris, int step) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] != step) return ; int p0, p1, p2, n0 = -1, n1 = -1, n2 = -1; int nCounter, pNextTri, pTri, pOri, pTri9; int x9 = x * 9; p2 = ctriangles[x9 + 3]; p1 = ctriangles[x9 + 5]; p0 = ctriangles[x9 + 4]; nCounter = 0; // orientation 0 // Travel through the list of triangles sharing vertex 0 with this triangle. // In this list we can find at most two triangles sharing edge (p0, p1) and // (p2, p0) with our triangle. pNextTri = cvertarr[p0]; while (pNextTri >= 0 && nCounter < 2) { pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); pTri9 = pTri * 9; if (p2 == ctriangles[pTri9 + 3 + (pOri + 2) % 3]) { // NextDest n2 = pNextTri; ctriangles[pTri9 + pOri] = (x << 2) | 2; nCounter++; } if (p1 == ctriangles[pTri9 + 3 + pOri]) { // NextApex n0 = (pTri << 2) | ((pOri + 2) % 3); ctriangles[pTri9 + (pOri + 2) % 3] = (x << 2); nCounter++; } pNextTri = ctriangles[pTri9 + 6 + pOri]; } // orientation 1 // Find the triangle with edge (p1, p2) pNextTri = cvertarr[p1]; while (pNextTri >= 0) { pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); pTri9 = pTri * 9; if (p2 == ctriangles[pTri9 + 3 + pOri]) { // NextApex n1 = (pTri << 2) | ((pOri + 2) % 3); ctriangles[pTri9 + (pOri + 2) % 3] = (x << 2) | 1; break ; } pNextTri = ctriangles[pTri9 + 6 + pOri]; } ctriangles[x9 + 0] = n0; ctriangles[x9 + 1] = n1; ctriangles[x9 + 2] = n2; } /******************************************************************** * Fix vertex array ********************************************************************/ __global__ void kernelMarkValidTriangles1(short *cnewtri, int *cvalid, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris) return ; cvalid[x] = (cnewtri[x] >= 0) ? 1 : 0; } __global__ void kernelCollectEmptySlots1(short *cnewtri, int *cprefix, int *cempty, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] >= 0) return ; int id = x - cprefix[x]; cempty[id] = x; } __global__ void kernelFillEmptySlots1(short *cnewtri, int *cprefix, int *cempty, int *ctriangles, int nTris, int newnTris, int offset) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] < 0) return ; int value; if (x < newnTris) value = x; else { value = cempty[cprefix[x] - offset]; for (int i = 0; i < 9; i++) ctriangles[value * 9 + i] = ctriangles[x * 9 + i]; } cprefix[x] = value; } __global__ void kernelFixIndices1(int *ctriangles, int *newindex, int nTris) { __shared__ int ct[WBLOCK * 9]; int tId = threadIdx.x; int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x, x9 = x * 9; int i, id; if (x >= nTris) return ; // Cooperatively read all triangles processed by one block for (i = 0, id = tId; i < 9; i++, id += WBLOCK) ct[id] = ctriangles[x9 + id]; __syncthreads(); if (x + tId < nTris) { i = tId * 9; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; i++; i++; i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); } __syncthreads(); for (i = 0, id = tId; i < 9; i++, id += WBLOCK) ctriangles[x9 + id] = ct[id]; } /******************************************************************** * Insert missing sites caused by overlapping or bad case shifting ********************************************************************/ void cudaMissing() { cudaFuncSetCacheConfig( kernelLocateTriangleContainer, cudaFuncCachePreferL1 ); cudaFuncSetCacheConfig( kernelUpdateMissingTriangleLinks, cudaFuncCachePreferL1 ); cutilSafeCall( cudaMemcpyToSymbol(constData, hostConst, 13 * sizeof(REAL)) ); // Collect dead triangles, insert new triangles to these slots first. int *cdeadtri, *cmarker, *cprefix; cutilSafeCall( cudaMalloc((void **) &cprefix, 2 * nVerts * sizeof(int)) ); cutilSafeCall( cudaMalloc((void **) &cmarker, 2 * nVerts * sizeof(int)) ); dim3 block = dim3(WBLOCK); dim3 grid = dim3(STRIPE, nTris / (STRIPE * block.x) + 1); int lastItem; // Mark all dead triangles as 1 in cmarker array, 0 if not. kernelMarkDeadTriangles<<< grid, block >>>(cmarker, cnewtri, nTris); cutilCheckError(); cutilSafeCall( cudaMemcpy(&lastItem, cmarker + nTris - 1, sizeof(int), cudaMemcpyDeviceToHost) ); // Use prefix sum to compute the offset thrust::exclusive_scan( IntDPtr(cmarker), IntDPtr(cmarker) + nTris, IntDPtr(cprefix) ); // Temporary release what we no longer need. cutilSafeCall( cudaFree(cmarker)); // Compute the size needed for the list of dead triangles // We also store the unused slots (after nTris but less than 2 * nVerts) int deadCounter; cutilSafeCall( cudaMemcpy(&deadCounter, cprefix + nTris - 1, sizeof(int), cudaMemcpyDeviceToHost) ); deadCounter += lastItem; int tailTri = nVerts * 2 - nTris; int deadListSize = deadCounter + tailTri; cutilSafeCall( cudaMalloc((void **) &cdeadtri, deadListSize * sizeof(int)) ); // Collect these dead triangles into the kernelCollectDeadTriangles<<< grid, block >>>(cdeadtri, cnewtri, cprefix, nTris); cutilCheckError(); //printf("Dead triangles: %i\n", deadCounter); grid = dim3(256); //tailTri / block.x + 1); kernelFillIncrement<<< grid, block >>>(cdeadtri + deadCounter, nTris, tailTri); cutilCheckError(); /******************************************************** * Process missing sites ********************************************************/ int *active; cutilSafeCall( cudaMalloc( (void**)&active, nVerts * sizeof(int) )); IntDPtr activePtr( active ); IntDPtr cvertarrPtr( cvertarr ); thrust::sequence( activePtr, activePtr + nVerts ); IntDPtr lastActivePtr = thrust::remove_if( activePtr, activePtr + nVerts, cvertarrPtr, isNotMinusOne() ); int noActive = lastActivePtr - activePtr; int *clocation; BYTE *caffected; cutilSafeCall( cudaMalloc((void **) &caffected, nVerts * sizeof(BYTE)) ); cutilSafeCall( cudaMalloc((void **) &clocation, nVerts * sizeof(int)) ); cutilSafeCall( cudaMalloc((void **) &cmarker, nVerts * sizeof(int)) ); block = dim3(128); dim3 gridFull = dim3(STRIPE, nVerts / (STRIPE * block.x) + 1); int triUsed = 0; do { // cprefix will be used as a marker for voting which insertion can be processed cutilSafeCall( cudaMemset(cprefix, 127, nVerts * 2 * sizeof(int)) ); cutilSafeCall( cudaMemset(cflag, 0, sizeof(int)) ); // Locate triangles containing the missing sites grid = dim3(STRIPE, noActive / (STRIPE * block.x) + 1); kernelLocateTriangleContainer<<< grid, block >>>(ctriangles, cvertarr, tvertices, clocation, cprefix, covertices, nVerts, active, noActive); cutilCheckError(); cutilSafeCall( cudaMemset(cmarker, 0, nVerts * sizeof(int)) ); cutilSafeCall( cudaMemset(caffected, 0, nVerts) ); // Determine which missing point insertion can be done in this pass kernelPreprocessTriangles<<< grid, block >>>(ctriangles, cvertarr, clocation, cprefix, tvertices, cnewtri, cmarker, caffected, nVerts, step, active, noActive); // In cmarker we have the number of new triangles // that will be generated by inserting each site (0 or 2). thrust::exclusive_scan( IntDPtr(cmarker), IntDPtr(cmarker) + nVerts, IntDPtr(cprefix) ); // We remove the container triangle and fix the vertex array. kernelFixVertexArrayMissing<<< gridFull, block >>>(ctriangles, cvertarr, caffected, cnewtri, nVerts); // We then insert three new triangles for each missing site inserted. kernelInsertMissingSites<<< grid, block >>>(ctriangles, cvertarr, clocation, cmarker, cdeadtri + triUsed, cprefix, cnewtri, nVerts, step, active, noActive); // Update the offset in the dead triangle list cutilSafeCall( cudaMemcpy(&lastItem, cmarker + nVerts - 1, sizeof(int), cudaMemcpyDeviceToHost) ); int used; cutilSafeCall( cudaMemcpy(&used, cprefix + nVerts - 1, sizeof(int), cudaMemcpyDeviceToHost) ); triUsed += used + lastItem; int newsize = MAX(nTris, nTris - deadCounter + triUsed); // Update links between the new triangles and the old one. grid = dim3(STRIPE, newsize / (STRIPE * block.x) + 1); kernelUpdateMissingTriangleLinks<<< grid, block >>>(ctriangles, cvertarr, cnewtri, newsize, step); //printf("--------Insert missing sites - step %i ; Inserted %i triangles\n", step, used + lastItem); IntDPtr lastActivePtr = thrust::remove_if( activePtr, activePtr + noActive, isNegative() ); noActive = lastActivePtr - activePtr; step++; } while (noActive > 0); cutilSafeCall( cudaFree( active ) ); // We do not keep track of the dead triangles after this, // because after removing the fake boundary, there would be a lot more, and they // will be recompute and recompact by then. deadCounter -= triUsed; // If we have used up all the dead triangles and more, we update nTris if (deadCounter < 0) nTris -= deadCounter; /******* DONE *******/ /********************************************************* * Compact the triangle list *********************************************************/ cutilSafeCall( cudaFree(cmarker)); cutilSafeCall( cudaFree(clocation)); cutilSafeCall( cudaFree(caffected)); cutilSafeCall( cudaFree(cprefix)); cutilSafeCall( cudaFree(tvertices)); cutilSafeCall( cudaFree(cdeadtri) ); /********************************************************* * Compact the triangle list *********************************************************/ if(deadCounter>0) { int *cvalid, *cprefix1; cutilSafeCall( cudaMalloc((void **) &cvalid, 2 * nVerts * sizeof(int)) ); cutilSafeCall( cudaMalloc((void **) &cprefix1, 2 * nVerts * sizeof(int)) ); block = dim3(WBLOCK); grid = dim3(STRIPE, nTris / (STRIPE * block.x) + 1); // Mark the valid triangles in the list kernelMarkValidTriangles1<<< grid, block >>>(cnewtri, cvalid, nTris); cutilCheckError(); // Compute the offset of them in the new list thrust::exclusive_scan( IntDPtr(cvalid), IntDPtr(cvalid) + nTris, IntDPtr(cprefix1) ); int newnTris, lastitem, offset; cutilSafeCall( cudaMemcpy(&newnTris, cprefix1 + nTris - 1, sizeof(int), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(&lastitem, cvalid + nTris - 1, sizeof(int), cudaMemcpyDeviceToHost) ); newnTris += lastitem; cutilSafeCall( cudaMemcpy(&offset, cprefix1 + newnTris, sizeof(int), cudaMemcpyDeviceToHost) ); // printf("nTris = %i, new nTris = %i\n", nTris, newnTris); // Find all empty slots in the list kernelCollectEmptySlots1<<< grid, block >>>(cnewtri, cprefix1, cvalid, nTris); cutilCheckError(); // Move those valid triangles at the end of the list // to the holes in the list. grid = dim3(STRIPE, nTris / (STRIPE * block.x) + 1); kernelFillEmptySlots1<<< grid, block >>>(cnewtri, cprefix1, cvalid, ctriangles, nTris, newnTris, offset); cutilCheckError(); // Fix the links after the index of our triangles are mixed up grid = dim3(STRIPE, newnTris / (STRIPE * block.x) + 1); kernelFixIndices1<<< grid, block >>>(ctriangles, cprefix1, newnTris); cutilCheckError(); cutilSafeCall( cudaFree(cprefix1)); cutilSafeCall( cudaFree(cvalid)); nTris = newnTris; } }
d2bdf26fbf5ab4fa27a6795a09a68f22712f2b14.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <type_traits> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/MemoryAccess.cuh> #include <ATen/native/hip/PersistentSoftmax.cuh> #include <ATen/native/hip/block_reduce.cuh> #include <c10/hip/HIPMathCompat.h> #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h> #include <ATen/native/nested/NestedTensorTransformerFunctions.h> #include <ATen/native/nested/NestedTensorUtils.h> #ifndef USE_ROCM #ifndef _WIN32 #include <cutlass/gemm/device/default_gemm_configuration.h> #include <cutlass/gemm/device/gemm_grouped.h> #include <cutlass/gemm/kernel/default_gemm_grouped.h> #endif #endif #include <ATen/NestedTensorImpl.h> #define BLOCK_DIM 256 #define GRID_DIM_Y 16 namespace at { namespace native { template <typename T> __global__ void remove_padding_transform0213_2( const T* input, T* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size) { const int batch_id = blockIdx.x; const int grid_id = blockIdx.y; const int tid = threadIdx.x + grid_id * BLOCK_DIM; const int grainsize = GRID_DIM_Y * BLOCK_DIM; const int offset = offsets[batch_id]; const int* sizes_i = output_sizes + batch_id * output_dim; const int numel_i = sizes_i[0] * sizes_i[1]; int input_offset = batch_id * input_sizes[1] * input_sizes[2] * input_sizes[3]; for (int ii = 0; ii < (numel_i / grainsize); ii++) { const int i = ii * grainsize + tid; const int i2 = i / sizes_i[1]; const int i13 = i % sizes_i[1]; const int i1 = i13 / (sizes_i[1] / input_sizes[1]); const int i3 = i13 % (sizes_i[1] / input_sizes[1]); output[offset + i] = input [input_offset + i1 * input_sizes[2] * input_sizes[3] + i2 * input_sizes[3] + i3]; } const int i = (numel_i / grainsize) * grainsize + tid; if (i < numel_i) { const int i2 = i / sizes_i[1]; const int i13 = i % sizes_i[1]; const int i1 = i13 / (sizes_i[1] / input_sizes[1]); const int i3 = i13 % (sizes_i[1] / input_sizes[1]); output[offset + i] = input [input_offset + i1 * input_sizes[2] * input_sizes[3] + i2 * input_sizes[3] + i3]; } } template <typename T> __global__ void remove_padding_2( const T* input, T* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size) { const int batch_id = blockIdx.x; const int grid_id = blockIdx.y; const int tid = threadIdx.x + grid_id * BLOCK_DIM; const int grainsize = GRID_DIM_Y * BLOCK_DIM; const int offset = offsets[batch_id]; const int* sizes_i = output_sizes + batch_id * output_dim; const int numel_i = sizes_i[0] * sizes_i[1]; int input_offset = batch_id * input_sizes[1] * input_sizes[2]; for (int ii = 0; ii < (numel_i / grainsize); ii++) { const int i = ii * grainsize + tid; const int i0 = i / sizes_i[1]; const int i1 = i % sizes_i[1]; const int i0_offset = i0 * input_sizes[2]; output[offset + i] = input[input_offset + i0_offset + i1]; } const int i = (numel_i / grainsize) * grainsize + tid; if (i < numel_i) { const int i0 = i / sizes_i[1]; const int i1 = i % sizes_i[1]; const int i0_offset = i0 * input_sizes[2]; output[offset + i] = input[input_offset + i0_offset + i1]; } } template <typename T> __global__ void remove_padding( const T* input, T* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size) { const int batch_id = blockIdx.x; const int grid_id = blockIdx.y; const int tid = threadIdx.x + grid_id * BLOCK_DIM; const int grainsize = GRID_DIM_Y * BLOCK_DIM; const int offset = offsets[batch_id]; const int* sizes_i = output_sizes + batch_id * output_dim; const int numel_i = sizes_i[0] * sizes_i[1] * sizes_i[2]; int input_offset = batch_id * input_sizes[1] * input_sizes[2] * input_sizes[3]; for (int ii = 0; ii < (numel_i / grainsize); ii++) { const int i = ii * grainsize + tid; const int i0 = i / (sizes_i[1] * sizes_i[2]); const int i1 = (i % (sizes_i[1] * sizes_i[2])) / sizes_i[2]; const int i2 = i % sizes_i[2]; const int i0_offset = i0 * input_sizes[2] * input_sizes[3]; const int i1_offset = i1 * input_sizes[3]; output[offset + i] = input[input_offset + i0_offset + i1_offset + i2]; } const int i = (numel_i / grainsize) * grainsize + tid; if (i < numel_i) { const int i0 = i / (sizes_i[1] * sizes_i[2]); const int i1 = (i % (sizes_i[1] * sizes_i[2])) / sizes_i[2]; const int i2 = i % sizes_i[2]; const int i0_offset = i0 * input_sizes[2] * input_sizes[3]; const int i1_offset = i1 * input_sizes[3]; output[offset + i] = input[input_offset + i0_offset + i1_offset + i2]; } } template <typename T> void remove_padding_kernelLauncher( const T* input, T* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size) { dim3 grid; grid.x = batch_size; grid.y = GRID_DIM_Y; at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (output_dim == 2) { hipLaunchKernelGGL(( remove_padding_2<T>), dim3(grid), dim3(BLOCK_DIM), 0, stream, input, output, offsets, input_sizes, output_sizes, output_dim, batch_size); } else { hipLaunchKernelGGL(( remove_padding<T>), dim3(grid), dim3(BLOCK_DIM), 0, stream, input, output, offsets, input_sizes, output_sizes, output_dim, batch_size); } } template <typename T> void remove_padding_transform0213_kernelLauncher( const T* input, T* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size) { dim3 grid; grid.x = batch_size; grid.y = GRID_DIM_Y; at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); TORCH_CHECK( output_dim == 2, "remove padding transform0213 only support output dim == 2"); hipLaunchKernelGGL(( remove_padding_transform0213_2<T>), dim3(grid), dim3(BLOCK_DIM), 0, stream, input, output, offsets, input_sizes, output_sizes, output_dim, batch_size); } template void remove_padding_kernelLauncher<float>( const float* input, float* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size); template void remove_padding_kernelLauncher<c10::Half>( const c10::Half* input, c10::Half* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size); template void remove_padding_transform0213_kernelLauncher<float>( const float* input, float* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size); template void remove_padding_transform0213_kernelLauncher<c10::Half>( const c10::Half* input, c10::Half* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size); template <typename T> __global__ void add_padding_1( const T* input, T* output, T padding_value, const int* offsets, const int* input_sizes, int input_dim, int output_sizes_1, const int batch_size) { const int batch_id = blockIdx.x; const int grid_id = blockIdx.y; const int tid = threadIdx.x + grid_id * BLOCK_DIM; const int grainsize = GRID_DIM_Y * BLOCK_DIM; const int* sizes_i = input_sizes + batch_id * input_dim; const int batch_output_offset = batch_id * output_sizes_1; for (int ii = 0; ii < (output_sizes_1 / grainsize); ii++) { const int i = ii * grainsize + tid; const int output_offset = batch_output_offset + i; if (batch_id < batch_size && i < sizes_i[0]) { const int batch_input_offset = offsets[batch_id]; output[output_offset] = input[batch_input_offset + i]; } else { output[output_offset] = padding_value; } } const int i = (output_sizes_1 / grainsize) * grainsize + tid; if (i < output_sizes_1) { const int output_offset = batch_output_offset + i; if (batch_id < batch_size && (i < sizes_i[0])) { const int batch_input_offset = offsets[batch_id]; output[output_offset] = input[batch_input_offset + i]; } else { output[output_offset] = padding_value; } } } template <typename T> __global__ void add_padding_2( const T* input, T* output, T padding_value, const int* offsets, const int* input_sizes, int input_dim, int output_sizes_1, int output_sizes_2, const int batch_size) { const int batch_id = blockIdx.x; const int grid_id = blockIdx.y; const int tid = threadIdx.x + grid_id * BLOCK_DIM; const int grainsize = GRID_DIM_Y * BLOCK_DIM; const int* sizes_i = input_sizes + batch_id * input_dim; const int output_offset = batch_id * output_sizes_1 * output_sizes_2; const int output_numel = output_sizes_1 * output_sizes_2; for (int ii = 0; ii < (output_numel / grainsize); ii++) { const int i = ii * grainsize + tid; const int i0 = i / (output_sizes_2); const int i1 = i - i0 * output_sizes_2; if (batch_id < batch_size && i0 < sizes_i[0] && i1 < sizes_i[1]) { const int offset = offsets[batch_id]; const int input_offset = offset + i0 * sizes_i[1] + i1; output[output_offset + i] = input[input_offset]; } else { output[output_offset + i] = padding_value; } } const int i = (output_numel / grainsize) * grainsize + tid; if (i < output_numel) { const int i0 = i / (output_sizes_2); const int i1 = i - i0 * output_sizes_2; if (batch_id < batch_size && i0 < sizes_i[0] && i1 < sizes_i[1]) { const int offset = offsets[batch_id]; const int input_offset = offset + i0 * sizes_i[1] + i1; output[output_offset + i] = input[input_offset]; } else { output[output_offset + i] = padding_value; } } } template <typename T> __global__ void add_padding_3( const T* input, T* output, T padding_value, const int* offsets, const int* input_sizes, int input_dim, int output_sizes_1, int output_sizes_2, int output_sizes_3, const int batch_size) { const int batch_id = blockIdx.x; const int grid_id = blockIdx.y; const int tid = threadIdx.x + grid_id * BLOCK_DIM; const int grainsize = GRID_DIM_Y * BLOCK_DIM; const int* sizes_i = input_sizes + batch_id * input_dim; const int output_offset = batch_id * output_sizes_1 * output_sizes_2 * output_sizes_3; const int output_numel = output_sizes_1 * output_sizes_2 * output_sizes_3; for (int ii = 0; ii < (output_numel / grainsize); ii++) { const int i = ii * grainsize + tid; const int i0 = i / (output_sizes_2 * output_sizes_3); const int i1 = (i % (output_sizes_2 * output_sizes_3)) / output_sizes_3; const int i2 = i % output_sizes_3; if (batch_id < batch_size && i0 < sizes_i[0] && i1 < sizes_i[1] && i2 < sizes_i[2]) { const int offset = offsets[batch_id]; const int input_offset = offset + i0 * (sizes_i[1] * sizes_i[2]) + i1 * sizes_i[2] + i2; output[output_offset + i] = input[input_offset]; } else { output[output_offset + i] = padding_value; } } const int i = (output_numel / grainsize) * grainsize + tid; if (i < output_numel) { const int i0 = i / (output_sizes_2 * output_sizes_3); const int i1 = (i % (output_sizes_2 * output_sizes_3)) / output_sizes_3; const int i2 = i % output_sizes_3; if (batch_id < batch_size && i0 < sizes_i[0] && i1 < sizes_i[1] && i2 < sizes_i[2]) { const int offset = offsets[batch_id]; const int input_offset = offset + i0 * (sizes_i[1] * sizes_i[2]) + i1 * sizes_i[2] + i2; output[output_offset + i] = input[input_offset]; } else { output[output_offset + i] = padding_value; } } } template <typename T> void add_padding_kernelLauncher( T* input, // [batch_size x None] T* output, // [batch_size x max(input.nested_size(1)) x inner_size] T padding_value, const int* offsets, const int* input_sizes, int input_dim, const std::vector<int64_t>& output_sizes, const int batch_size, const int output_batch_size) { at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid; grid.x = output_batch_size; grid.y = GRID_DIM_Y; if (input_dim == 1) { hipLaunchKernelGGL(( add_padding_1<T>), dim3(grid), dim3(BLOCK_DIM), 0, stream, input, output, padding_value, offsets, input_sizes, input_dim, output_sizes[1], batch_size); } if (input_dim == 2) { hipLaunchKernelGGL(( add_padding_2<T>), dim3(grid), dim3(BLOCK_DIM), 0, stream, input, output, padding_value, offsets, input_sizes, input_dim, output_sizes[1], output_sizes[2], batch_size); } if (input_dim == 3) { hipLaunchKernelGGL(( add_padding_3<T>), dim3(grid), dim3(BLOCK_DIM), 0, stream, input, output, padding_value, offsets, input_sizes, input_dim, output_sizes[1], output_sizes[2], output_sizes[3], batch_size); } } template void add_padding_kernelLauncher<double>( double* input, double* output, double padding_value, const int* offsets, const int* input_sizes, int input_dim, const std::vector<int64_t>& output_sizes, const int batch_size, const int output_batch_size); template void add_padding_kernelLauncher<float>( float* input, float* output, float padding_value, const int* offsets, const int* input_sizes, int input_dim, const std::vector<int64_t>& output_sizes, const int batch_size, const int output_batch_size); template void add_padding_kernelLauncher<c10::Half>( c10::Half* input, c10::Half* output, c10::Half padding_value, const int* offsets, const int* input_sizes, int input_dim, const std::vector<int64_t>& output_sizes, const int batch_size, const int output_batch_size); } // namespace native } // namespace at
d2bdf26fbf5ab4fa27a6795a09a68f22712f2b14.cu
#include <type_traits> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/MemoryAccess.cuh> #include <ATen/native/cuda/PersistentSoftmax.cuh> #include <ATen/native/cuda/block_reduce.cuh> #include <c10/cuda/CUDAMathCompat.h> #include <c10/cuda/CUDAStream.h> #include <ATen/native/nested/NestedTensorTransformerFunctions.h> #include <ATen/native/nested/NestedTensorUtils.h> #ifndef USE_ROCM #ifndef _WIN32 #include <cutlass/gemm/device/default_gemm_configuration.h> #include <cutlass/gemm/device/gemm_grouped.h> #include <cutlass/gemm/kernel/default_gemm_grouped.h> #endif #endif #include <ATen/NestedTensorImpl.h> #define BLOCK_DIM 256 #define GRID_DIM_Y 16 namespace at { namespace native { template <typename T> __global__ void remove_padding_transform0213_2( const T* input, T* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size) { const int batch_id = blockIdx.x; const int grid_id = blockIdx.y; const int tid = threadIdx.x + grid_id * BLOCK_DIM; const int grainsize = GRID_DIM_Y * BLOCK_DIM; const int offset = offsets[batch_id]; const int* sizes_i = output_sizes + batch_id * output_dim; const int numel_i = sizes_i[0] * sizes_i[1]; int input_offset = batch_id * input_sizes[1] * input_sizes[2] * input_sizes[3]; for (int ii = 0; ii < (numel_i / grainsize); ii++) { const int i = ii * grainsize + tid; const int i2 = i / sizes_i[1]; const int i13 = i % sizes_i[1]; const int i1 = i13 / (sizes_i[1] / input_sizes[1]); const int i3 = i13 % (sizes_i[1] / input_sizes[1]); output[offset + i] = input [input_offset + i1 * input_sizes[2] * input_sizes[3] + i2 * input_sizes[3] + i3]; } const int i = (numel_i / grainsize) * grainsize + tid; if (i < numel_i) { const int i2 = i / sizes_i[1]; const int i13 = i % sizes_i[1]; const int i1 = i13 / (sizes_i[1] / input_sizes[1]); const int i3 = i13 % (sizes_i[1] / input_sizes[1]); output[offset + i] = input [input_offset + i1 * input_sizes[2] * input_sizes[3] + i2 * input_sizes[3] + i3]; } } template <typename T> __global__ void remove_padding_2( const T* input, T* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size) { const int batch_id = blockIdx.x; const int grid_id = blockIdx.y; const int tid = threadIdx.x + grid_id * BLOCK_DIM; const int grainsize = GRID_DIM_Y * BLOCK_DIM; const int offset = offsets[batch_id]; const int* sizes_i = output_sizes + batch_id * output_dim; const int numel_i = sizes_i[0] * sizes_i[1]; int input_offset = batch_id * input_sizes[1] * input_sizes[2]; for (int ii = 0; ii < (numel_i / grainsize); ii++) { const int i = ii * grainsize + tid; const int i0 = i / sizes_i[1]; const int i1 = i % sizes_i[1]; const int i0_offset = i0 * input_sizes[2]; output[offset + i] = input[input_offset + i0_offset + i1]; } const int i = (numel_i / grainsize) * grainsize + tid; if (i < numel_i) { const int i0 = i / sizes_i[1]; const int i1 = i % sizes_i[1]; const int i0_offset = i0 * input_sizes[2]; output[offset + i] = input[input_offset + i0_offset + i1]; } } template <typename T> __global__ void remove_padding( const T* input, T* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size) { const int batch_id = blockIdx.x; const int grid_id = blockIdx.y; const int tid = threadIdx.x + grid_id * BLOCK_DIM; const int grainsize = GRID_DIM_Y * BLOCK_DIM; const int offset = offsets[batch_id]; const int* sizes_i = output_sizes + batch_id * output_dim; const int numel_i = sizes_i[0] * sizes_i[1] * sizes_i[2]; int input_offset = batch_id * input_sizes[1] * input_sizes[2] * input_sizes[3]; for (int ii = 0; ii < (numel_i / grainsize); ii++) { const int i = ii * grainsize + tid; const int i0 = i / (sizes_i[1] * sizes_i[2]); const int i1 = (i % (sizes_i[1] * sizes_i[2])) / sizes_i[2]; const int i2 = i % sizes_i[2]; const int i0_offset = i0 * input_sizes[2] * input_sizes[3]; const int i1_offset = i1 * input_sizes[3]; output[offset + i] = input[input_offset + i0_offset + i1_offset + i2]; } const int i = (numel_i / grainsize) * grainsize + tid; if (i < numel_i) { const int i0 = i / (sizes_i[1] * sizes_i[2]); const int i1 = (i % (sizes_i[1] * sizes_i[2])) / sizes_i[2]; const int i2 = i % sizes_i[2]; const int i0_offset = i0 * input_sizes[2] * input_sizes[3]; const int i1_offset = i1 * input_sizes[3]; output[offset + i] = input[input_offset + i0_offset + i1_offset + i2]; } } template <typename T> void remove_padding_kernelLauncher( const T* input, T* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size) { dim3 grid; grid.x = batch_size; grid.y = GRID_DIM_Y; at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); if (output_dim == 2) { remove_padding_2<T><<<grid, BLOCK_DIM, 0, stream>>>( input, output, offsets, input_sizes, output_sizes, output_dim, batch_size); } else { remove_padding<T><<<grid, BLOCK_DIM, 0, stream>>>( input, output, offsets, input_sizes, output_sizes, output_dim, batch_size); } } template <typename T> void remove_padding_transform0213_kernelLauncher( const T* input, T* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size) { dim3 grid; grid.x = batch_size; grid.y = GRID_DIM_Y; at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); TORCH_CHECK( output_dim == 2, "remove padding transform0213 only support output dim == 2"); remove_padding_transform0213_2<T><<<grid, BLOCK_DIM, 0, stream>>>( input, output, offsets, input_sizes, output_sizes, output_dim, batch_size); } template void remove_padding_kernelLauncher<float>( const float* input, float* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size); template void remove_padding_kernelLauncher<c10::Half>( const c10::Half* input, c10::Half* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size); template void remove_padding_transform0213_kernelLauncher<float>( const float* input, float* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size); template void remove_padding_transform0213_kernelLauncher<c10::Half>( const c10::Half* input, c10::Half* output, const int* offsets, const int* input_sizes, const int* output_sizes, int output_dim, const int batch_size); template <typename T> __global__ void add_padding_1( const T* input, T* output, T padding_value, const int* offsets, const int* input_sizes, int input_dim, int output_sizes_1, const int batch_size) { const int batch_id = blockIdx.x; const int grid_id = blockIdx.y; const int tid = threadIdx.x + grid_id * BLOCK_DIM; const int grainsize = GRID_DIM_Y * BLOCK_DIM; const int* sizes_i = input_sizes + batch_id * input_dim; const int batch_output_offset = batch_id * output_sizes_1; for (int ii = 0; ii < (output_sizes_1 / grainsize); ii++) { const int i = ii * grainsize + tid; const int output_offset = batch_output_offset + i; if (batch_id < batch_size && i < sizes_i[0]) { const int batch_input_offset = offsets[batch_id]; output[output_offset] = input[batch_input_offset + i]; } else { output[output_offset] = padding_value; } } const int i = (output_sizes_1 / grainsize) * grainsize + tid; if (i < output_sizes_1) { const int output_offset = batch_output_offset + i; if (batch_id < batch_size && (i < sizes_i[0])) { const int batch_input_offset = offsets[batch_id]; output[output_offset] = input[batch_input_offset + i]; } else { output[output_offset] = padding_value; } } } template <typename T> __global__ void add_padding_2( const T* input, T* output, T padding_value, const int* offsets, const int* input_sizes, int input_dim, int output_sizes_1, int output_sizes_2, const int batch_size) { const int batch_id = blockIdx.x; const int grid_id = blockIdx.y; const int tid = threadIdx.x + grid_id * BLOCK_DIM; const int grainsize = GRID_DIM_Y * BLOCK_DIM; const int* sizes_i = input_sizes + batch_id * input_dim; const int output_offset = batch_id * output_sizes_1 * output_sizes_2; const int output_numel = output_sizes_1 * output_sizes_2; for (int ii = 0; ii < (output_numel / grainsize); ii++) { const int i = ii * grainsize + tid; const int i0 = i / (output_sizes_2); const int i1 = i - i0 * output_sizes_2; if (batch_id < batch_size && i0 < sizes_i[0] && i1 < sizes_i[1]) { const int offset = offsets[batch_id]; const int input_offset = offset + i0 * sizes_i[1] + i1; output[output_offset + i] = input[input_offset]; } else { output[output_offset + i] = padding_value; } } const int i = (output_numel / grainsize) * grainsize + tid; if (i < output_numel) { const int i0 = i / (output_sizes_2); const int i1 = i - i0 * output_sizes_2; if (batch_id < batch_size && i0 < sizes_i[0] && i1 < sizes_i[1]) { const int offset = offsets[batch_id]; const int input_offset = offset + i0 * sizes_i[1] + i1; output[output_offset + i] = input[input_offset]; } else { output[output_offset + i] = padding_value; } } } template <typename T> __global__ void add_padding_3( const T* input, T* output, T padding_value, const int* offsets, const int* input_sizes, int input_dim, int output_sizes_1, int output_sizes_2, int output_sizes_3, const int batch_size) { const int batch_id = blockIdx.x; const int grid_id = blockIdx.y; const int tid = threadIdx.x + grid_id * BLOCK_DIM; const int grainsize = GRID_DIM_Y * BLOCK_DIM; const int* sizes_i = input_sizes + batch_id * input_dim; const int output_offset = batch_id * output_sizes_1 * output_sizes_2 * output_sizes_3; const int output_numel = output_sizes_1 * output_sizes_2 * output_sizes_3; for (int ii = 0; ii < (output_numel / grainsize); ii++) { const int i = ii * grainsize + tid; const int i0 = i / (output_sizes_2 * output_sizes_3); const int i1 = (i % (output_sizes_2 * output_sizes_3)) / output_sizes_3; const int i2 = i % output_sizes_3; if (batch_id < batch_size && i0 < sizes_i[0] && i1 < sizes_i[1] && i2 < sizes_i[2]) { const int offset = offsets[batch_id]; const int input_offset = offset + i0 * (sizes_i[1] * sizes_i[2]) + i1 * sizes_i[2] + i2; output[output_offset + i] = input[input_offset]; } else { output[output_offset + i] = padding_value; } } const int i = (output_numel / grainsize) * grainsize + tid; if (i < output_numel) { const int i0 = i / (output_sizes_2 * output_sizes_3); const int i1 = (i % (output_sizes_2 * output_sizes_3)) / output_sizes_3; const int i2 = i % output_sizes_3; if (batch_id < batch_size && i0 < sizes_i[0] && i1 < sizes_i[1] && i2 < sizes_i[2]) { const int offset = offsets[batch_id]; const int input_offset = offset + i0 * (sizes_i[1] * sizes_i[2]) + i1 * sizes_i[2] + i2; output[output_offset + i] = input[input_offset]; } else { output[output_offset + i] = padding_value; } } } template <typename T> void add_padding_kernelLauncher( T* input, // [batch_size x None] T* output, // [batch_size x max(input.nested_size(1)) x inner_size] T padding_value, const int* offsets, const int* input_sizes, int input_dim, const std::vector<int64_t>& output_sizes, const int batch_size, const int output_batch_size) { at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); dim3 grid; grid.x = output_batch_size; grid.y = GRID_DIM_Y; if (input_dim == 1) { add_padding_1<T><<<grid, BLOCK_DIM, 0, stream>>>( input, output, padding_value, offsets, input_sizes, input_dim, output_sizes[1], batch_size); } if (input_dim == 2) { add_padding_2<T><<<grid, BLOCK_DIM, 0, stream>>>( input, output, padding_value, offsets, input_sizes, input_dim, output_sizes[1], output_sizes[2], batch_size); } if (input_dim == 3) { add_padding_3<T><<<grid, BLOCK_DIM, 0, stream>>>( input, output, padding_value, offsets, input_sizes, input_dim, output_sizes[1], output_sizes[2], output_sizes[3], batch_size); } } template void add_padding_kernelLauncher<double>( double* input, double* output, double padding_value, const int* offsets, const int* input_sizes, int input_dim, const std::vector<int64_t>& output_sizes, const int batch_size, const int output_batch_size); template void add_padding_kernelLauncher<float>( float* input, float* output, float padding_value, const int* offsets, const int* input_sizes, int input_dim, const std::vector<int64_t>& output_sizes, const int batch_size, const int output_batch_size); template void add_padding_kernelLauncher<c10::Half>( c10::Half* input, c10::Half* output, c10::Half padding_value, const int* offsets, const int* input_sizes, int input_dim, const std::vector<int64_t>& output_sizes, const int batch_size, const int output_batch_size); } // namespace native } // namespace at
62b64f91055335104f4041c3b210130de65acb36.hip
// !!! This is a file automatically generated by hipify!!! //general parts #include <stdio.h> #include <vector> #include <memory> #include <string.h> #include <chrono> #include <thread> #include <iostream> #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> //CUDA parts #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hipfft.h> #define GROUP 1 void sample_1001_benchmark_cuFFT_double_2_4096(bool file_output, FILE* output, int device_id) { const int num_runs = 3; if (file_output) fprintf(output, "1001 - cuFFT FFT + iFFT C2C benchmark 1D batched in double precision: all supported systems from 2 to 4096\n"); printf("1001 - cuFFT FFT + iFFT C2C benchmark 1D batched in double precision: all supported systems from 2 to 4096\n"); hipSetDevice(device_id); double benchmark_result[2] = { 0,0 };//averaged result = sum(system_size/iteration_time)/num_benchmark_samples hipfftDoubleComplex* inputC = (hipfftDoubleComplex*)malloc((uint64_t)sizeof(hipfftDoubleComplex) *pow(2, 27)); for (uint64_t i = 0; i <pow(2, 27); i++) { inputC[i].x = 2 * ((double)rand()) / RAND_MAX - 1.0; inputC[i].y = 2 * ((double)rand()) / RAND_MAX - 1.0; } int num_systems = 0; for (int n = 1; n < 4097; n++) { double run_time[num_runs][2]; for (int r = 0; r < num_runs; r++) { hipfftHandle planZ2Z; hipfftDoubleComplex* dataC; uint64_t dims[3]; dims[0] = n; if (n == 1) dims[0] = 4096; uint64_t temp = dims[0]; /*for (uint64_t j = 2; j < 14; j++) { if (temp % j == 0) { temp /= j; j = 1; } } if (temp != 1) break;*/ dims[1] = pow(2, (uint64_t)log2(64 * 32 * pow(2, 15) / dims[0])); if (dims[1] < 1) dims[1] = 1; dims[2] = 1; hipMalloc((void**)&dataC, sizeof(hipfftDoubleComplex) * dims[0] * dims[1] * dims[2]); hipMemcpy(dataC, inputC, sizeof(hipfftDoubleComplex) * dims[0] * dims[1] * dims[2], hipMemcpyHostToDevice); if (hipGetLastError() != hipSuccess) { fprintf(stderr, "Cuda error: Failed to allocate\n"); return; } uint64_t sizeCUDA; switch (1) { case 1: hipfftPlan1d(&planZ2Z, dims[0], HIPFFT_Z2Z, dims[1]); hipfftEstimate1d(dims[0], HIPFFT_Z2Z, 1, (size_t*)&sizeCUDA); break; case 2: hipfftPlan2d(&planZ2Z, dims[1], dims[0], HIPFFT_Z2Z); hipfftEstimate2d(dims[1], dims[0], HIPFFT_Z2Z, (size_t*)&sizeCUDA); break; case 3: hipfftPlan3d(&planZ2Z, dims[2], dims[1], dims[0], HIPFFT_Z2Z); hipfftEstimate3d(dims[2], dims[1], dims[0], HIPFFT_Z2Z, (size_t*)&sizeCUDA); break; } double totTime = 0; uint64_t cuBufferSize = sizeof(double) * 2 * dims[0] * dims[1] * dims[2]; uint64_t num_iter = ((4096 * 1024.0 * 1024.0) / cuBufferSize > 1000) ? 1000 : (4096 * 1024.0 * 1024.0) / cuBufferSize ; if (num_iter == 0) num_iter = 1; auto timeSubmit = std::chrono::steady_clock::now(); for (int i = 0; i < num_iter; i++) { hipfftExecZ2Z(planZ2Z, dataC, dataC, -1); hipfftExecZ2Z(planZ2Z, dataC, dataC, 1); } hipDeviceSynchronize(); auto timeEnd = std::chrono::steady_clock::now(); totTime = (std::chrono::duration_cast<std::chrono::microseconds>(timeEnd - timeSubmit).count() * 0.001) / num_iter; run_time[r][0] = totTime; if (n > 1) { if (r == num_runs - 1) { num_systems++; double std_error = 0; double avg_time = 0; for (uint64_t t = 0; t < num_runs; t++) { avg_time += run_time[t][0]; } avg_time /= num_runs; for (uint64_t t = 0; t < num_runs; t++) { std_error += (run_time[t][0] - avg_time) * (run_time[t][0] - avg_time); } std_error = sqrt(std_error / num_runs); if (file_output) fprintf(output, "cuFFT System: %" PRIu64 " %" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 " bandwidth: %0.1f\n", dims[0], dims[1], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize * sizeof(float) / sizeof(double) / 1024) / avg_time), cuBufferSize / 1024.0 / 1024.0 / 1.024 * 4 / avg_time); printf("cuFFT System: %" PRIu64 " %" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 " bandwidth: %0.1f\n", dims[0], dims[1], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize * sizeof(float) / sizeof(double) / 1024) / avg_time), cuBufferSize / 1024.0 / 1024.0 / 1.024 * 4 / avg_time); benchmark_result[0] += ((double)cuBufferSize * sizeof(float)/sizeof(double)/ 1024) / avg_time; } } hipfftDestroy(planZ2Z); hipFree(dataC); hipDeviceSynchronize(); //hipfftDoubleComplex* output_cuFFT = (hipfftDoubleComplex*)(malloc(sizeof(hipfftDoubleComplex) * dims[0] * dims[1] * dims[2])); //hipMemcpy(output_cuFFT, dataC, sizeof(hipfftDoubleComplex) * dims[0] * dims[1] * dims[2], hipMemcpyDeviceToHost); //hipDeviceSynchronize(); } } free(inputC); benchmark_result[0] /= (num_systems); if (file_output) fprintf(output, "Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0])); printf("Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0])); }
62b64f91055335104f4041c3b210130de65acb36.cu
//general parts #include <stdio.h> #include <vector> #include <memory> #include <string.h> #include <chrono> #include <thread> #include <iostream> #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> //CUDA parts #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cufft.h> #define GROUP 1 void sample_1001_benchmark_cuFFT_double_2_4096(bool file_output, FILE* output, int device_id) { const int num_runs = 3; if (file_output) fprintf(output, "1001 - cuFFT FFT + iFFT C2C benchmark 1D batched in double precision: all supported systems from 2 to 4096\n"); printf("1001 - cuFFT FFT + iFFT C2C benchmark 1D batched in double precision: all supported systems from 2 to 4096\n"); cudaSetDevice(device_id); double benchmark_result[2] = { 0,0 };//averaged result = sum(system_size/iteration_time)/num_benchmark_samples cufftDoubleComplex* inputC = (cufftDoubleComplex*)malloc((uint64_t)sizeof(cufftDoubleComplex) *pow(2, 27)); for (uint64_t i = 0; i <pow(2, 27); i++) { inputC[i].x = 2 * ((double)rand()) / RAND_MAX - 1.0; inputC[i].y = 2 * ((double)rand()) / RAND_MAX - 1.0; } int num_systems = 0; for (int n = 1; n < 4097; n++) { double run_time[num_runs][2]; for (int r = 0; r < num_runs; r++) { cufftHandle planZ2Z; cufftDoubleComplex* dataC; uint64_t dims[3]; dims[0] = n; if (n == 1) dims[0] = 4096; uint64_t temp = dims[0]; /*for (uint64_t j = 2; j < 14; j++) { if (temp % j == 0) { temp /= j; j = 1; } } if (temp != 1) break;*/ dims[1] = pow(2, (uint64_t)log2(64 * 32 * pow(2, 15) / dims[0])); if (dims[1] < 1) dims[1] = 1; dims[2] = 1; cudaMalloc((void**)&dataC, sizeof(cufftDoubleComplex) * dims[0] * dims[1] * dims[2]); cudaMemcpy(dataC, inputC, sizeof(cufftDoubleComplex) * dims[0] * dims[1] * dims[2], cudaMemcpyHostToDevice); if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "Cuda error: Failed to allocate\n"); return; } uint64_t sizeCUDA; switch (1) { case 1: cufftPlan1d(&planZ2Z, dims[0], CUFFT_Z2Z, dims[1]); cufftEstimate1d(dims[0], CUFFT_Z2Z, 1, (size_t*)&sizeCUDA); break; case 2: cufftPlan2d(&planZ2Z, dims[1], dims[0], CUFFT_Z2Z); cufftEstimate2d(dims[1], dims[0], CUFFT_Z2Z, (size_t*)&sizeCUDA); break; case 3: cufftPlan3d(&planZ2Z, dims[2], dims[1], dims[0], CUFFT_Z2Z); cufftEstimate3d(dims[2], dims[1], dims[0], CUFFT_Z2Z, (size_t*)&sizeCUDA); break; } double totTime = 0; uint64_t cuBufferSize = sizeof(double) * 2 * dims[0] * dims[1] * dims[2]; uint64_t num_iter = ((4096 * 1024.0 * 1024.0) / cuBufferSize > 1000) ? 1000 : (4096 * 1024.0 * 1024.0) / cuBufferSize ; if (num_iter == 0) num_iter = 1; auto timeSubmit = std::chrono::steady_clock::now(); for (int i = 0; i < num_iter; i++) { cufftExecZ2Z(planZ2Z, dataC, dataC, -1); cufftExecZ2Z(planZ2Z, dataC, dataC, 1); } cudaDeviceSynchronize(); auto timeEnd = std::chrono::steady_clock::now(); totTime = (std::chrono::duration_cast<std::chrono::microseconds>(timeEnd - timeSubmit).count() * 0.001) / num_iter; run_time[r][0] = totTime; if (n > 1) { if (r == num_runs - 1) { num_systems++; double std_error = 0; double avg_time = 0; for (uint64_t t = 0; t < num_runs; t++) { avg_time += run_time[t][0]; } avg_time /= num_runs; for (uint64_t t = 0; t < num_runs; t++) { std_error += (run_time[t][0] - avg_time) * (run_time[t][0] - avg_time); } std_error = sqrt(std_error / num_runs); if (file_output) fprintf(output, "cuFFT System: %" PRIu64 " %" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 " bandwidth: %0.1f\n", dims[0], dims[1], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize * sizeof(float) / sizeof(double) / 1024) / avg_time), cuBufferSize / 1024.0 / 1024.0 / 1.024 * 4 / avg_time); printf("cuFFT System: %" PRIu64 " %" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 " bandwidth: %0.1f\n", dims[0], dims[1], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize * sizeof(float) / sizeof(double) / 1024) / avg_time), cuBufferSize / 1024.0 / 1024.0 / 1.024 * 4 / avg_time); benchmark_result[0] += ((double)cuBufferSize * sizeof(float)/sizeof(double)/ 1024) / avg_time; } } cufftDestroy(planZ2Z); cudaFree(dataC); cudaDeviceSynchronize(); //cufftDoubleComplex* output_cuFFT = (cufftDoubleComplex*)(malloc(sizeof(cufftDoubleComplex) * dims[0] * dims[1] * dims[2])); //cudaMemcpy(output_cuFFT, dataC, sizeof(cufftDoubleComplex) * dims[0] * dims[1] * dims[2], cudaMemcpyDeviceToHost); //cudaDeviceSynchronize(); } } free(inputC); benchmark_result[0] /= (num_systems); if (file_output) fprintf(output, "Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0])); printf("Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0])); }
99d6df063ade9f6eff4021cb5f5b862f3f8a23b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // cudamatrix/cu-kernels.cu // Copyright 2009-2012 Karel Vesely // 2013 Ehsan Variani // 2013 Johns Hopkins University (author: Daniel Povey) // 2013 Hainan Xu // 2013 Xiaohui Zhang // 2013-2015 Guoguo Chen // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. // In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers #include <cfloat> #include "cudamatrix/cu-kernels-ansi.h" /*********************************************************************** * Generic __device__ functions */ template<typename Real> __device__ static Real _sum_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (sum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x >= halfPoint) { // was < // Get the shared value stored by another thread Real temp = 0.0; if(threadIdx.x < nTotalThreads) { // was +halfPoint temp = buffer[threadIdx.x]; // was +halfPoint } buffer[threadIdx.x - halfPoint] += temp; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static Real _min_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (min) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp < buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two } // the result return buffer[0]; } template<typename Real> __device__ static Real _max_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (max) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (get index of maximum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread Real temp = -1e20; if(threadIdx.x+halfPoint < nTotalThreads) { temp = val[idx[threadIdx.x + halfPoint]]; } if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return idx[0]; } /*********************************************************************** * CUDA kernels * the functions are templated to have the float/double operations */ /* * CuMatrix */ template<typename Real> __global__ static void _copy_low_upp(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i <= j || i >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } template<typename Real> __global__ static void _copy_upp_low(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j <= i || j >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } // mat += diag(vec) * mat2. template<typename Real> __global__ static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim, const Real *vec, const Real *mat2, int mat2_row_stride, int mat2_col_stride, Real beta) { // Note from Dan: in this kernel, we make the x dimension correspond to the // row index and y to the column index. That was not always the case for // earlier kernels written by others. int i = blockIdx.y * blockDim.y + threadIdx.y; // row index int j = blockIdx.x * blockDim.x + threadIdx.x; // column index int index = i * mat_dim.stride + j, index2 = i * mat2_row_stride + j * mat2_col_stride; if (i < mat_dim.rows && j < mat_dim.cols) { mat[index] = alpha * vec[i] * mat2[index2] + beta * mat[index]; } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (i * (i+1) / 2) + j; int32_cuda index_A = i * dmat.stride + j; if (j <= i) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // transpose the indices used to index the source TpMatrix. if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (j * (j+1) / 2) + i; int32_cuda index_A = i * dmat.stride + j; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } // for this kernel, following the newer pattern, the x-dim is the row-index, the // y-dim is the col-index. template<typename Real, typename OtherReal> __global__ static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index. int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = j + i * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } // for this kernel, the x-dim is the row-index at the output, the y-dim is the // col-index at the output template<typename Real, typename OtherReal> __global__ static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index out int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index out int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = i + j * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].row * d_out.stride + smat_in[smat_index].column; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat_trans(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].column * d_out.stride + smat_in[smat_index].row; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat_trans(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].row * mat_d_in.stride + smat_in[smat_index].column; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].column * mat_d_in.stride + smat_in[smat_index].row; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _transpose_matrix(Real* mat, MatrixDim d) { // Transposes a square matrix in-place. int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j >= i || i >= d.rows) { return; } // Only half the threads act. int32_cuda index_a = j + i * d.stride, index_b = i + j * d.stride; Real a = mat[index_a], b = mat[index_b]; mat[index_a] = b; mat[index_b] = a; } template<typename Real> __global__ static void _apply_exp(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if ( i < d.cols && j < d.rows ) { mat[index] = exp(mat[index]); } } template<typename Real> __global__ static void _scale_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value * mat[index]; } } template<typename Real> __global__ static void _set_diag(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = i + i*d.stride; if ( i < d.rows && i < d.cols) { mat[index] = value; } } template<typename Real> __global__ static void _set_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value; } } template<typename Real> __global__ static void _add_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = mat[index] + value; } } template<typename Real> __global__ static void _set_const(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = value; } template<typename Real> __global__ static void _set_zero_above_diag(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < i) mat[index] = 0.0; } template<typename Real> __global__ static void _add(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] + value; } template<typename Real> __global__ static void _scale(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] * value; } template<typename Real> __global__ static void _apply_log(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = log(mat[index]); } template<typename Real> __global__ static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] * A[src_index]; } template<typename Real> __global__ static void _div_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] / A[src_index]; } template<typename Real> __global__ static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if ( i < dst_d.cols && j < dst_d.rows ) { Real a = mat[dst_index], b = A[src_index]; mat[dst_index] = (a > b ? a : b); } } template<typename Real> __global__ static void _vec_mul_elements(Real* v, const Real* a, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) v[i] = v[i] * a[i]; } template<typename Real> __global__ static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[i]; } template<typename Real> __global__ static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[j]; } template<typename Real> __global__ static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride; int src_index = i / group_size + j * src_stride; y[dst_index] *= x[src_index]; } } /// y is the derivative we will output; vec is the input we're computing /// the group p-norm on, "norm" is the previously computed group p-norm. template<typename Real> __global__ static void _calc_pnorm_deriv(Real *deriv, const Real *vec, const Real *norm, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. norm_element = norm[src_index]; // this is the pnorm Real vec_element_sign = (vec_element > 0 ? 1 : -1); Real ans; if (norm_element <= 0.0) ans = 0.0; // The derivative is either zero or undefined at the origin. else ans = vec_element_sign * pow(std::abs(vec_element), power - 1) * pow(norm_element, 1 - power); deriv[dst_index] = ans; } } /// deriv is the derivative we will output; vec is the input we're computing /// the group max on, "maxv" is the previously computed group max. template<typename Real> __global__ static void _calc_group_max_deriv(Real *deriv, const Real *vec, const Real *maxv, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. max_element = maxv[src_index]; // this is the max value Real ans = (max_element == vec_element ? 1.0 : 0.0); deriv[dst_index] = ans; } } /// Set each element to y = (x == orig ? changed : x). template<typename Real> __global__ static void _replace_value(Real *vec, int dim, Real orig, Real changed) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) if (vec[i] == orig) vec[i] = changed; } template<typename Real> __global__ static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (j >= d.rows ) return; //invert divider in shared memory __shared__ Real inv[16]; if(threadIdx.x==0) { inv[threadIdx.y] = 1.0/vec_div[j]; } __syncthreads(); //multiply elements if (i < d.cols && j < d.rows) mat[index] *= inv[threadIdx.y]; } template<typename Real> __global__ static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; int32_cuda index_src = i + j*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j *d.stride; int32_cuda index_src = j + i*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_blocks(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.rows + q * d.cols] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_blocks_trans(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = j + i * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.cols + q * d.rows] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_mat_div_mat(const Real* A, const Real* B, const Real* C, Real* dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, a_index = i + j*stride_a, b_index = i + j*stride_b, c_index = i + j*stride_c; if (i < d.cols && j < d.rows) if (C[c_index] == 0) dst[index] = A[a_index]; else dst[index] = A[a_index] * B[b_index] / C[c_index]; } // Given a matrix input S (not packed!) and a lower-triangular matrix L, // this function does S = beta S + alpha * L^T L. This is used in PSD matrix inversion. // The i index is the row of the destination S and the j the column (although of // course the output is symmetric so it doesn't matter in a sense). The main point // of this is to make use of various symmetries and zero-ness. template<typename Real> __global__ static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim, Real *S, MatrixDim sdim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= sdim.rows || j > i) return; // this thread computes the dot-product of the i'th column of // L with the j'th column of L. The values we're multiplying // are only nonzero for row-index k greater or equal to // max(i, j), which equals i. Real sum = 0.0; for (int k = i; k < sdim.rows; k++) { int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k; sum += T[i_index] * T[j_index]; } int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i; S[output_index1] = alpha * sum + beta * S[output_index1]; S[output_index2] = alpha * sum + beta * S[output_index2]; } template<typename Real> __global__ static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*col[j] + beta*dst[index]; } template<typename Real> __global__ static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*row[i] + beta*dst[index]; } template<typename Real> __global__ static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*dmat.stride; int32_cuda index2 = i + j*dmask.stride; if ( i < dmat.cols && j < dmat.rows ) if(mask[index2] == 0) mat[index] = 0; } template<typename Real> __global__ static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim, const Real *mat2, int mat2_row_stride, int mat2_col_stride, const Real *vec, Real beta) { // Note from Dan: in this kernel, we make the x dimension correspond to the // row index and y to the column index. That was not always the case for // earlier kernels written by others. int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j, index2 = i * mat2_row_stride + j * mat2_col_stride; if (i < mat_dim.rows && j < mat_dim.cols) { mat[index] = alpha * mat2[index2] * vec[j] + beta * mat[index]; } } template<typename Real> __global__ static void _add_mat_mat_elements(Real *data, const Real *srcA_data, const Real *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j*dim.stride; int32_cuda srcA_index = i + j*srcA_stride; int32_cuda srcB_index = i + j*srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index] + beta * data[tgt_index] ; } } /* * CuVector */ // very limited application! template<typename Real> __global__ static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2, Real param_3, int* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { Real ratio = a[i] / param_3; if ( ( ratio < 0.0 ) || ( ratio >= 1.01 )) { *flag = 1; return; } if ( ratio < param_1 ) { Real factor = ((param_1/ratio) > param_2) ? param_2 : (param_1/ratio); v[i] = v[i] / factor; } else if ( ratio > param_1 ) { Real factor = ((ratio/param_1) > param_2) ? param_2 : (ratio/param_1); v[i] = v[i] * factor; } } } template<typename Real> __global__ static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (double) v_in[i]; } } // This kernel writes a copy of the vector "v_in" to each row of the matrix // "m_out". the dimension of v_in should be equal to the #columns of m_out. In // this kernel, following the new pattern, x corresponds to row-index and y to // column-index. template<typename Real> __global__ static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row index. int j = blockIdx.y * blockDim.y + threadIdx.y; // column index. if (i < d.rows && j < d.cols) { int index = i * d.stride + j; m_out[index] = v_in[j]; } } template<typename Real> __global__ static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if ( i < dim) { v_out[i] = (float) v_in[i]; } } template<typename Real> __global__ static void _vec_min(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= CU1DBLOCK) return; __shared__ Real row_data[CU1DBLOCK]; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real min = 1.0 / 0.0; // infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j < min) min = v_j; } row_data[i] = min; __syncthreads(); //get the sum *value = _min_reduce(row_data); } template<typename Real> __global__ static void _vec_max(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.y > 0) return; __shared__ Real row_data[CU1DBLOCK]; if(i >= CU1DBLOCK) return; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real max = -1.0 / 0.0; // -infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j > max) max = v_j; } row_data[i] = max; __syncthreads(); //get the sum *value = _max_reduce(row_data); } // _trace_mat_mat expects to be called with 1 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 0]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; int block_size = (num_elements + num_threads - 1) / num_threads; int loop_start = i * block_size, loop_end = (i + 1) * block_size; if (loop_end > num_elements) loop_end = num_elements; Real sum = 0.0; for (int j = loop_start; j < loop_end; j++) { // for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = row + col * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // _trace_mat_mat_trans expects to be called with 4 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 3]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; // int block_size = (num_elements + num_threads - 1) / num_threads; // int loop_start = i * block_size, loop_end = (i + 1) * block_size; // if (loop_end > num_elements) // loop_end = num_elements; Real sum = 0.0; // for (int j = loop_start; j < loop_end; j++) { for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = col + row * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // Adds diag(M N) to v, where M and N are matrices. We supply row_stride and // col_stride arguments for M and N, and swapping them allows us to transpose // those matrices. Note: we imagine row-major indexing here, just like Kaldi // and CBLAS (but unlike CUBLAS). // This kernel expects the blockDim to be (CU1DBLOCK, 1) and the // gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element. // threads_per_element should be a power of 2. template<typename Real> __global__ static void _add_diag_mat_mat( Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride, int M_col_stride, const Real *N, int N_row_stride, int N_col_stride, int threads_per_element, Real beta) { // we actually assume blockDim.x == CU1DBLOCK here. // Each diagonal element of v is processed by "threads_per_element" threads. __shared__ Real temp_data[CU1DBLOCK]; int i = blockIdx.x * blockDim.x + threadIdx.x; int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells // us which block of elements we sum up. if (v_idx >= v_dim) return; Real sum = 0.0; for (int j = sub_idx; j < M_cols; j += threads_per_element) { int M_index = v_idx * M_row_stride + j * M_col_stride, N_index = j * N_row_stride + v_idx * N_col_stride; sum += M[M_index] * N[N_index]; } temp_data[threadIdx.x] = sum; // start_idx = threadIdx.x - sub_idx; // start of the position in temp_data // that we want to sum up. // The following is a tree-based reduction of the elements of temp_data from // start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx". __syncthreads(); int num_total_threads = threads_per_element; while (num_total_threads > 1) { int half_point = ((1 + num_total_threads) >> 1); if (sub_idx < half_point) { Real temp = 0.0; if (sub_idx + half_point < num_total_threads) { temp = temp_data[threadIdx.x + half_point]; } temp_data[threadIdx.x] += temp; } __syncthreads(); num_total_threads = half_point; } if (sub_idx == 0) { v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x]; } } template<typename Real> __global__ static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) v[i] = alpha * x[i] * y[i] + beta * v[i]; } template<typename Real> __global__ static void _copy_col_from_mat_df(double* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (double) mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_fd(float* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (float) mat[index]; } template<typename Real> __global__ static void _vec_apply_exp(Real* v, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v[i] = exp(v[i]); } } template<typename Real> __global__ static void _vec_apply_log(Real* v, Real* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { if (v[i] < 0) { *flag = 1; return; } v[i] = log(v[i]); } } template<typename Real> __global__ static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z, MatrixDim d, Real* z2, MatrixDim d2, Real* t) { int i = threadIdx.x; __shared__ Real tot_objf[CU1DBLOCK]; __shared__ Real tot_weight[CU1DBLOCK]; Real tmp_weight_sum = 0; Real tmp_tot_objf = 0; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i*size; loop_end = threshold + (i+1)*size; } for(int j = loop_start; j< loop_end; j++) { int m = (x + j)->row; //* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) ); int label = (x + j)->column; //*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int)); Real weight = (x + j)->weight; //*(Real*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) ) + 2 * sizeof(int)); tmp_weight_sum += weight; Real this_prob = *(z + m * d.stride + label); tmp_tot_objf += weight * log(this_prob); *(z2 + m * d2.stride + label ) += weight / this_prob;// there might be problems here.... } tot_objf[i] = tmp_tot_objf; tot_weight[i] = tmp_weight_sum; __syncthreads(); *t = _sum_reduce(tot_objf); __syncthreads(); *(t+1) = _sum_reduce(tot_weight); return; } template<typename Real> __global__ static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha, MatrixElement<Real>* x, int num_elements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_elements) return; data[x[i].row * dim.stride + x[i].column] += alpha * x[i].weight; } template<typename Real> __global__ static void _cuda_matrix_add_indexed_values(MatrixDim dim, Real alpha, const Int32Pair* indices, const Real* x, int s, Real* data) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= s) return; int data_i = indices[i].first * dim.stride + indices[i].second; data[data_i] += alpha * x[i]; } template<typename Real> __global__ static void _matrix_lookup(const Real *data, MatrixDim dim, const Int32Pair *indices, int indices_size, Real *output) { int ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= indices_size) return; int data_ind = indices[ind].first * dim.stride + indices[ind].second; output[ind] = data[data_ind]; } template<typename Real> __global__ static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; //col int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; //row int32_cuda index_mat1 = i + j*mat1_dim.stride; int32_cuda index_mat2 = i + j*mat2_stride; int32_cuda index_mask = i + j*mask_stride; if (i < mat1_dim.cols && j < mat1_dim.rows) mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0); } template<typename Real> __global__ static void _vec_sum(Real *v, Real *sum, int dim, int inc) { int i = threadIdx.x; __shared__ Real row_data[CU1DBLOCK]; if (i >= CU1DBLOCK) return; Real tmp_sum = 0; int size = dim / CU1DBLOCK; //the least size in a loop (later part) int threshold = dim - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i * size; loop_end = threshold + (i+1) * size; } for(int j = loop_start; j< loop_end; j++) { tmp_sum += v[j * inc]; } row_data[threadIdx.x] = tmp_sum; __syncthreads(); *sum = _sum_reduce(row_data); } template<typename Real> __global__ static void _pvec_sum(Real* v, Real* g, int dim, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int start = size * i; if (start >= dim) return; int end = start + size; if (end > dim) end = dim; __shared__ Real row_data[CU1DBLOCK]; Real sum = 0; for (int j = start; j < end; j++) sum += v[j]; row_data[threadIdx.x] = sum; __syncthreads(); g[blockIdx.x] = _sum_reduce(row_data); } template<typename Real> __global__ static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] < floor_val) { v[i] = floor_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _vec_apply_ceiling(Real *v, Real ceiling_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] > ceiling_val) { v[i] = ceiling_val; count[i] = 1; } else { count[i] = 0; } } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_pow(Real* mat, Real power, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { if (power == 1.0) return; if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { if (!(mat[index] >= 0.0)) return; mat[index] = sqrt(mat[index]); } else { mat[index] = pow(mat[index], power); } } } template<typename Real> __global__ static void _apply_pow_abs(Real* mat, Real power, bool include_sign, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { if (include_sign == true && mat[index] < 0) { if (power == 1.0) mat[index] = -std::abs(mat[index]); if (power == 2.0) { mat[index] = -mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = -sqrt(std::abs(mat[index])); } else { mat[index] = -pow(std::abs(mat[index]), power); } } else { if (power == 1.0) mat[index] = std::abs(mat[index]); if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = sqrt(std::abs(mat[index])); } else if (power < 0.0 && mat[index] == 0.0) { mat[index] = 0.0; } else { mat[index] = pow(std::abs(mat[index]), power); } } } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_heaviside(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0); } } template<typename Real> __global__ static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (mat[index] < floor_val) mat[index] = floor_val; } } template<typename Real> __global__ static void _copy_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[j], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = i * src_stride + reorder[j]; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0.0; } } } template<typename Real> __global__ static void _add_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[j], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = i * src_stride + reorder[j]; Real val = src[src_index]; dst[dst_index] += val; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[i], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = reorder[i] * src_stride + j; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int dst_index = i * dst_dim.stride + j; if (src[i] != NULL) { dst[dst_index] = src[i][j]; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_to_rows(Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < src_dim.rows && j < src_dim.cols) { if (dst[i] != NULL) { dst[i][j] = src[i * src_dim.stride + j]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int dst_index = i * dst_dim.stride + j; if (reorder[i] >= 0) { int src_index = reorder[i] * src_stride + j; dst[dst_index] += alpha * src[src_index]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int dst_index = i * dst_dim.stride + j; if (src[i] != NULL) { dst[dst_index] += alpha * src[i][j]; } } } template<typename Real> __global__ static void _add_to_rows(Real alpha, Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < src_dim.rows && j < src_dim.cols) { if (dst[i] != NULL) { dst[i][j] += alpha * src[i * src_dim.stride + j]; } } } template<typename Real> __global__ static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows ) { if (mat[index] > ceiling_val) mat[index] = ceiling_val; } } template<typename Real> __global__ static void _invert_elements(Real* data, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j*d.stride; if (i < d.cols && j < d.rows) data[index] = 1.0/data[index]; } // matrix-wise, do data = alpha * data + beta * A * B^T, // where B is a block matrix. template<typename Real> __global__ static void _add_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[j]; int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset, B_num_rows = block_data.matrix_dim.rows, B_num_cols = block_data.matrix_dim.cols, B_row_stride = block_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(block_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < B_num_cols; k++) { const Real *this_B_col = B_data + k; const Real *this_A_row = A_data + i * A_row_stride + B_row_start * A_col_stride; // this_A_row points to the element A[i][B_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < B_num_rows; l++) // l indexes rows of B. sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + B_col_start); data[index] = alpha * sum + beta * data[index]; } } // For a block matrix B, does B = alpha * C * D + beta * B. // the (x,y,z) indices are the block index, then the row // and column indices within the block. Note: transposition of C and D // is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride), // so it's invisible to this code. The num-cols and num-rows of C and D // are only provided to the extent that they are not already determined // by other quantities. template<typename Real> __global__ static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks, const Real *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const Real *D_data, int D_row_stride, int D_col_stride, Real alpha, Real beta) { int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B. int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block if (b >= num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[b]; if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols) return; // we're outside the dimensions of the b'th block. // B_elem is the element of B we're writing to. Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data) + i * block_data.matrix_dim.stride + j; Real B_val = *B_elem; // B_row and B_col are the (row, col) index into the full matrix B. int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j; const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data + D_col_stride * B_col; Real sum = 0.0; for (int k = 0; k < C_num_cols; k++) { sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride]; } *B_elem = alpha * sum + beta * B_val; } template<typename Real> __global__ static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } // Since this is a newer kernel, x is the row-index and y is the // column-index. template<typename Real> __global__ static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indices) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride + indices[col].first, src_end_index = row * src_dim.stride + indices[col].second; Real sum = 0.0; for (int index = src_start_index; index < src_end_index; index++) sum += src_data[index]; data[dst_index] = sum; } template<typename Real> __global__ static void _add_row_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indexes) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col; int src_index_start = indexes[row].first, src_index_end = indexes[row].second; for (int row_index = src_index_start; row_index < src_index_end; row_index++) data[dst_index] += src_data[row_index * src_dim.stride + col]; } template<typename Real> __global__ static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; // compute the function y[index] = log(1 + exp(x[index])) if(i < d.cols && j < d.rows) { Real val = x[src_index], result; if (val >= 10.0) result = val; // function approaches y=x as x gets large else result = log1p(exp(val)); y[dst_index] = result; } } template<typename Real> __global__ static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; Real tmp = 0; int src_begin_index = i * group_size + j * src_stride; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { tmp += pow(std::abs(x[src_index]), power); } tmp = pow(tmp, Real(1.0 / power)); if (!isnan(tmp)) { y[dst_index] = tmp; } else { Real max_value = x[src_begin_index], min_value = max_value; for (int src_index = src_begin_index + 1; src_index < src_end_index; src_index ++) { if (x[src_index] > max_value) max_value = x[src_index]; if (x[src_index] < min_value) min_value = x[src_index]; } tmp = 0.0; Real max_abs_value = (max_value > -min_value ? max_value : -min_value); // let max_value be the // largest abs(value) if (max_abs_value == 0) { y[dst_index] = 0.0; } else { for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { Real x_scaled = x[src_index] / max_abs_value; tmp += pow(std::abs(x_scaled), Real(power)); } y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value; } } } } template<typename Real> __global__ static void _group_max(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; int src_begin_index = i * group_size + j * src_stride; Real max_value = -1e20; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { if (!isnan(x[src_index]) && x[src_index] > max_value) max_value = x[src_index]; } y[dst_index] = max_value; } } /* * cu:: */ template<typename Real> __global__ static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = 1.0 / (1.0 + exp(-x[src_index])); y[dst_index] = res; } } template<typename Real> __global__ static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = y[y_index]*(1.0-y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j * src_stride; if(i < d.cols && j < d.rows) { Real exp_2x = exp(2.0*x[src_index]); Real res; if(isinf(exp_2x)) { res = 1.0; } else { res = (exp_2x - 1.0) / (exp_2x + 1.0); } y[dst_index] = res; } } template<typename Real> __global__ static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (1.0 - y[y_index]*y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; //copy input to aux aux[threadIdx.x] = x[threadIdx.x+j*d.stride]; for(int i=1; i<steps; ++i) { if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride]) aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride]; } //get the maximum value int nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real max = aux[0]; __syncthreads(); // subtract max, apply exp, sum up... y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max); aux[threadIdx.x] = y[threadIdx.x+j*d.stride]; for(int i=1; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max); aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride]; } } nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real sum = aux[0]; __syncthreads(); //normalize by sum... for(int i=0; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum; } } } template<typename Real> __global__ static void _log_softmax_reduce(Real *y, const Real *x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; // Maximum step 1: loads input data to <aux>. If <d.cols> is larger than // <blockDim.x>, then we do a first pass filtering and only // keep a <blockDim.x> size array. aux[threadIdx.x] = x[threadIdx.x + j * d.stride]; for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x + i * THREADS + j * d.stride]) aux[threadIdx.x] = x[threadIdx.x + i * THREADS + j * d.stride]; } // Maximum step 2: the standard max reduce. int nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x + halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real max = aux[0]; __syncthreads(); // Log sum step 1: substracts max, and takes exponentials. y[threadIdx.x + j * d.stride] = x[threadIdx.x + j * d.stride] - max; aux[threadIdx.x] = exp(y[threadIdx.x + j * d.stride]); for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] = x[threadIdx.x + i * THREADS + j * d.stride] - max; aux[threadIdx.x] += exp(y[threadIdx.x + i * THREADS + j * d.stride]); } } // Log sum step 2: comptes summation and then takes logarithm. nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real log_sum = log(aux[0]); __syncthreads(); // Computes log softmax. for (int i = 0; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] -= log_sum; } } } template<typename Real> __global__ static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = i % d_in.cols; int32_cuda src_row = j + off[i / d_in.cols]; if(src_row < 0) src_row = 0; if(src_row >= d_in.rows) src_row = d_in.rows-1; y[index] = x[src_col + src_row*d_in.stride]; } } template<typename Real> __global__ static void _take_mean(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index1 = i + j * d_in.stride; int32_cuda index2 = j + i * d_in.stride; if (i <= j && j < d_in.rows) { int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = 0.5 * (x[index1] + x[index2]); } } template<typename Real> __global__ static void _take_lower(const Real* x, Real* y, MatrixDim d_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j > i || i >= d_in.rows) return; int index = i * d_in.stride + j; Real val = x[index]; int index_sp = (i * (i+1) / 2) + j; y[index_sp] = val; } template<typename Real> __global__ static void _take_upper(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j < i || j >= d_in.rows) return; int32_cuda index = i * d_in.stride + j; int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = x[index]; } template<typename Real> __global__ static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1) * (i+2) / 2) - 1; if (i < dim) { y[i] = x[index]; } } template<typename Real> __global__ static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dim.rows && j < dim.cols) { int dst_index = i * dim.stride + j, src_index; if (j <= i) { // no transpose src_index = (i * (i+1) / 2) + j; } else { // transpose. src_index = (j * (j+1) / 2) + i; } y[dst_index] = x[src_index]; } } template<typename Real> __global__ static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = copy_from[i]; if(src_col >= 0 && src_col < d_in.cols) { y[index] = x[src_col + j*d_in.stride]; } else { y[index] = 1.0/0.0; } } } template<typename Real> __global__ static void _one(Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { x[i] = 1.0; } } template<typename Real> __global__ static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_row = copy_from[j]; y[index] = x[i + src_row*d_in.stride]; } } template<typename Real> __global__ static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, grad_index = i + j*stride_grad; if (i < d.cols && j < d.rows) { if(wei[index]==0.0) return; //skip L1 if zero weight! Real l1_signed = l1; if(wei[index] < 0.0) //flip sign l1_signed = -l1; Real before = wei[index]; Real after = wei[index] -lr*grad[grad_index] -l1_signed;//simulate update if((after > 0.0) ^ (before > 0.0)) { //sign changed? wei[index] = 0.0; grad[grad_index] = 0.0; } else { wei[index] -= l1_signed; } } } template<typename Real> __global__ static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(blockIdx.x > 0) return; if(blockDim.y != 1) return; __shared__ Real value[CU1DBLOCK]; __shared__ int32_cuda index[CU1DBLOCK]; //copy to shared memory value[threadIdx.x] = mat[i+j*d.stride]; index[threadIdx.x] = threadIdx.x; __syncthreads(); //get the id of the max value int32_cuda out_max = _max_id_reduce(value, index); __syncthreads(); //see if it's bigger value if(threadIdx.x == 0) { if(vec_val[j] <= mat[out_max+j*d.stride]) { vec_val[j] = mat[out_max+j*d.stride]; vec_id[j] = voff+out_max; } } } template<typename Real> __global__ static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(i>0) return; if(j<d.rows) { int32_cuda index = vec_tgt[j] + j*d.stride; Real value = mat_net_out[index]; if(value < 1e-20) value = 1e-20; vec_log_post[j] = log(value); mat_net_out[index] -= 1.0; } } /*********************************************************************** * ANSI-C wrappers of CUDA kernels */ /* * "int32" */ void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } /* * "float" */ /* * CuMatrix */ void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *vec, const float *mat2, int mat2_row_stride, int mat2_col_stride, float beta) { hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaF_transpose_matrix(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _transpose_matrix), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d); } void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power, bool include_sign, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d); } void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_add_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows_direct(dim3 Gr, dim3 Bl, float* dst, const float* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, dst_dim); } void cudaF_copy_to_rows_direct(dim3 Gr, dim3 Bl, float* const* dst, const float* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _copy_to_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, src_dim); } void cudaF_add_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, dst_dim, src_stride); } void cudaF_add_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, dst_dim); } void cudaF_add_to_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* const* dst, const float* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, src_dim); } void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d); } void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaF_calc_pnorm_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size, float power) { hipLaunchKernelGGL(( _calc_pnorm_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size, power); } void cudaF_calc_group_max_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _calc_group_max_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size); } void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) { hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d); } void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } else { hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } } void cudaF_add_mat_blocks(dim3 Gr, dim3 Bl, float alpha, const float* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_blocks_trans), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { hipLaunchKernelGGL(( _add_mat_blocks), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaF_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B, const float *C, float *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { hipLaunchKernelGGL(( _add_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d, stride_a, stride_b, stride_c); } void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T, MatrixDim tdim, float *S, MatrixDim sdim) { hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim); } void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d); } void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d); } void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *mat2, int mat2_row_stride, int mat2_col_stride, const float *vec, float beta) { hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask); } /* * CuVector */ void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig, float changed) { hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed); } void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a, float param_1, float param_2, float param_3, int* flag, int dim) { hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim); } void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) { hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim); } void cudaF_vec_min(const float* v, float* value, int dim) { hipLaunchKernelGGL(( _vec_min), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaF_vec_max(const float* v, float* value, int dim) { hipLaunchKernelGGL(( _vec_max), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaF_trace_mat_mat_trans(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { hipLaunchKernelGGL(( _trace_mat_mat_trans<float,4>) , dim3(4),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaF_trace_mat_mat(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { hipLaunchKernelGGL(( _trace_mat_mat<float,2>) , dim3(2),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M, int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride, int N_col_stride, int threads_per_element, float beta) { hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) { hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim); } void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_sum), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc); } void cudaF_pvec_sum(int Gr, int Bl, float* v, float* pvec_sum, int dim, int size) { hipLaunchKernelGGL(( _pvec_sum), dim3(Gr),dim3(Bl), 0, 0, v, pvec_sum, dim, size); } void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, float alpha, MatrixElement<float>* x, int num_elements) { hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, num_elements); } void cudaF_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, float alpha, const Int32Pair* indices, const float* x, int s, float* data) { hipLaunchKernelGGL(( _cuda_matrix_add_indexed_values), dim3(Gr), dim3(Bl), 0, 0, dim, alpha, indices, x, s, data); } void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s, const float* z, MatrixDim d, float* z2, MatrixDim d2, float* t) { hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t); } void cudaD_comp_obj_deriv(dim3 Gr,dim3 Bl, MatrixElement<double>* x, int s, const double* z, MatrixDim d, double* z2, MatrixDim d2, double* t) { hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t); } void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst, const float *src, int dim) { hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim); } void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim); } void cudaF_vec_apply_ceiling(int Gr, int Bl, float* v, float ceiling_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, v, ceiling_val,count,dim); } void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) { hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim); } void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) { hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim); } void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) { hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d); } void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d, const float *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, float alpha, float beta, int B_trans) { if (B_trans) { hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const float *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const float *D_data, int D_row_stride, int D_col_stride, float alpha, float beta) { hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaF_soft_hinge (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size, float power) { hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power); } void cudaF_group_max(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _group_max), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_log_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _log_softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in); } void cudaF_one(int Gr, int Bl, float* x, int dim) { hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim); } void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim dim) { hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x, y, dim); } void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) { hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad); } void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d); } void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) { hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d); } void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) { hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in); } void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const float* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indices) { hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices); } void cudaF_add_row_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indexes) { hipLaunchKernelGGL(( _add_row_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indexes); } void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim, const Int32Pair *indices, int indices_size, float *output) { hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output); } void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1, const float *mat2, float *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* * "double" */ /* * CuMatrix */ void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *vec, const double *mat2, int mat2_row_stride, int mat2_col_stride, double beta) { hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaD_transpose_matrix(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _transpose_matrix), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d); } void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power, bool include_sign, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d); } void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_add_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows_direct(dim3 Gr, dim3 Bl, double* dst, const double* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, dst_dim); } void cudaD_copy_to_rows_direct(dim3 Gr, dim3 Bl, double* const* dst, const double* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _copy_to_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, src_dim); } void cudaD_add_rows(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, dst_dim, src_stride); } void cudaD_add_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, dst_dim); } void cudaD_add_to_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* const* dst, const double* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, src_dim); } void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d); } void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaD_calc_pnorm_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size, double power) { hipLaunchKernelGGL(( _calc_pnorm_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size, power); } void cudaD_calc_group_max_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _calc_group_max_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size); } void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) { hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d); } void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } else { hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } } void cudaD_add_mat_blocks(dim3 Gr, dim3 Bl, double alpha, const double* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_blocks_trans), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { hipLaunchKernelGGL(( _add_mat_blocks), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaD_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A, const double *B, const double *C, double *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { hipLaunchKernelGGL(( _add_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d,stride_a,stride_b,stride_c); } void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta, const double* T, MatrixDim tdim, double *S, MatrixDim sdim) { hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim); } void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d); } void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d); } void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *mat2, int mat2_row_stride, int mat2_col_stride, const double *vec, double beta) { hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask); } /* * CuVector */ void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig, double changed) { hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed); } void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a, double param_1, double param_2, double param_3, int* flag, int dim) { hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim); } void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) { hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim); } void cudaD_vec_min(const double* v, double* value, int dim) { hipLaunchKernelGGL(( _vec_min), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaD_vec_max(const double* v, double* value, int dim) { hipLaunchKernelGGL(( _vec_max), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaD_trace_mat_mat_trans(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { hipLaunchKernelGGL(( _trace_mat_mat_trans<double,4>) , dim3(4),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaD_trace_mat_mat(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { hipLaunchKernelGGL(( _trace_mat_mat<double,2>) , dim3(2),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M, int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride, int N_col_stride, int threads_per_element, double beta) { hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) { hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim); } void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const double* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_sum), dim3(Gr),dim3(Bl), 0, 0, v,value,dim,inc); } void cudaD_pvec_sum(int Gr, int Bl, double* v, double* pvec_sum, int dim, int size) { hipLaunchKernelGGL(( _pvec_sum), dim3(Gr),dim3(Bl), 0, 0, v,pvec_sum,dim,size); } void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, double alpha, MatrixElement<double>* x, int num_elements) { hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, num_elements); } void cudaD_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, double alpha, const Int32Pair* indices, const double* x, int s, double* data) { hipLaunchKernelGGL(( _cuda_matrix_add_indexed_values), dim3(Gr), dim3(Bl), 0, 0, dim, alpha, indices, x, s, data); } void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst, const double *src, int dim) { hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim); } void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim); } void cudaD_vec_apply_ceiling(int Gr, int Bl, double* v, double ceiling_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, v,ceiling_val,count,dim); } void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) { hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim); } void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) { hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim); } void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) { hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d); } void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d, const double *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, double alpha, double beta, int B_trans) { if (B_trans) { hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const double *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const double *D_data, int D_row_stride, int D_col_stride, double alpha, double beta) { hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaD_soft_hinge (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size, double power) { hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power); } void cudaD_group_max(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _group_max), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_log_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _log_softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in); } void cudaD_one(int Gr, int Bl, double* x, int dim) { hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim); } void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_out) { hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x,y,d_out); } void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d,int stride_grad) { hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad); } void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d); } void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) { hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d); } void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) { hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in); } void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indices) { hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices); } void cudaD_add_row_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indexes) { hipLaunchKernelGGL(( _add_row_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indexes); } void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim, const Int32Pair *indices, int indices_size, double *output) { hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output); } void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1, const double *mat2, double *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* Some conversion kernels for which it's more convenient to not name them F or D. */ void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_smat_ff(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cudaF_trace_mat_smat(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaF_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); }
99d6df063ade9f6eff4021cb5f5b862f3f8a23b6.cu
// cudamatrix/cu-kernels.cu // Copyright 2009-2012 Karel Vesely // 2013 Ehsan Variani // 2013 Johns Hopkins University (author: Daniel Povey) // 2013 Hainan Xu // 2013 Xiaohui Zhang // 2013-2015 Guoguo Chen // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. // In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers #include <cfloat> #include "cudamatrix/cu-kernels-ansi.h" /*********************************************************************** * Generic __device__ functions */ template<typename Real> __device__ static Real _sum_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (sum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x >= halfPoint) { // was < // Get the shared value stored by another thread Real temp = 0.0; if(threadIdx.x < nTotalThreads) { // was +halfPoint temp = buffer[threadIdx.x]; // was +halfPoint } buffer[threadIdx.x - halfPoint] += temp; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static Real _min_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (min) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp < buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two } // the result return buffer[0]; } template<typename Real> __device__ static Real _max_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (max) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (get index of maximum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread Real temp = -1e20; if(threadIdx.x+halfPoint < nTotalThreads) { temp = val[idx[threadIdx.x + halfPoint]]; } if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return idx[0]; } /*********************************************************************** * CUDA kernels * the functions are templated to have the float/double operations */ /* * CuMatrix */ template<typename Real> __global__ static void _copy_low_upp(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i <= j || i >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } template<typename Real> __global__ static void _copy_upp_low(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j <= i || j >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } // mat += diag(vec) * mat2. template<typename Real> __global__ static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim, const Real *vec, const Real *mat2, int mat2_row_stride, int mat2_col_stride, Real beta) { // Note from Dan: in this kernel, we make the x dimension correspond to the // row index and y to the column index. That was not always the case for // earlier kernels written by others. int i = blockIdx.y * blockDim.y + threadIdx.y; // row index int j = blockIdx.x * blockDim.x + threadIdx.x; // column index int index = i * mat_dim.stride + j, index2 = i * mat2_row_stride + j * mat2_col_stride; if (i < mat_dim.rows && j < mat_dim.cols) { mat[index] = alpha * vec[i] * mat2[index2] + beta * mat[index]; } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (i * (i+1) / 2) + j; int32_cuda index_A = i * dmat.stride + j; if (j <= i) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // transpose the indices used to index the source TpMatrix. if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (j * (j+1) / 2) + i; int32_cuda index_A = i * dmat.stride + j; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } // for this kernel, following the newer pattern, the x-dim is the row-index, the // y-dim is the col-index. template<typename Real, typename OtherReal> __global__ static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index. int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = j + i * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } // for this kernel, the x-dim is the row-index at the output, the y-dim is the // col-index at the output template<typename Real, typename OtherReal> __global__ static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index out int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index out int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = i + j * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].row * d_out.stride + smat_in[smat_index].column; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat_trans(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].column * d_out.stride + smat_in[smat_index].row; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat_trans(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].row * mat_d_in.stride + smat_in[smat_index].column; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].column * mat_d_in.stride + smat_in[smat_index].row; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _transpose_matrix(Real* mat, MatrixDim d) { // Transposes a square matrix in-place. int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j >= i || i >= d.rows) { return; } // Only half the threads act. int32_cuda index_a = j + i * d.stride, index_b = i + j * d.stride; Real a = mat[index_a], b = mat[index_b]; mat[index_a] = b; mat[index_b] = a; } template<typename Real> __global__ static void _apply_exp(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if ( i < d.cols && j < d.rows ) { mat[index] = exp(mat[index]); } } template<typename Real> __global__ static void _scale_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value * mat[index]; } } template<typename Real> __global__ static void _set_diag(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = i + i*d.stride; if ( i < d.rows && i < d.cols) { mat[index] = value; } } template<typename Real> __global__ static void _set_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value; } } template<typename Real> __global__ static void _add_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = mat[index] + value; } } template<typename Real> __global__ static void _set_const(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = value; } template<typename Real> __global__ static void _set_zero_above_diag(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < i) mat[index] = 0.0; } template<typename Real> __global__ static void _add(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] + value; } template<typename Real> __global__ static void _scale(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] * value; } template<typename Real> __global__ static void _apply_log(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = log(mat[index]); } template<typename Real> __global__ static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] * A[src_index]; } template<typename Real> __global__ static void _div_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] / A[src_index]; } template<typename Real> __global__ static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if ( i < dst_d.cols && j < dst_d.rows ) { Real a = mat[dst_index], b = A[src_index]; mat[dst_index] = (a > b ? a : b); } } template<typename Real> __global__ static void _vec_mul_elements(Real* v, const Real* a, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) v[i] = v[i] * a[i]; } template<typename Real> __global__ static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[i]; } template<typename Real> __global__ static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[j]; } template<typename Real> __global__ static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride; int src_index = i / group_size + j * src_stride; y[dst_index] *= x[src_index]; } } /// y is the derivative we will output; vec is the input we're computing /// the group p-norm on, "norm" is the previously computed group p-norm. template<typename Real> __global__ static void _calc_pnorm_deriv(Real *deriv, const Real *vec, const Real *norm, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. norm_element = norm[src_index]; // this is the pnorm Real vec_element_sign = (vec_element > 0 ? 1 : -1); Real ans; if (norm_element <= 0.0) ans = 0.0; // The derivative is either zero or undefined at the origin. else ans = vec_element_sign * pow(std::abs(vec_element), power - 1) * pow(norm_element, 1 - power); deriv[dst_index] = ans; } } /// deriv is the derivative we will output; vec is the input we're computing /// the group max on, "maxv" is the previously computed group max. template<typename Real> __global__ static void _calc_group_max_deriv(Real *deriv, const Real *vec, const Real *maxv, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. max_element = maxv[src_index]; // this is the max value Real ans = (max_element == vec_element ? 1.0 : 0.0); deriv[dst_index] = ans; } } /// Set each element to y = (x == orig ? changed : x). template<typename Real> __global__ static void _replace_value(Real *vec, int dim, Real orig, Real changed) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) if (vec[i] == orig) vec[i] = changed; } template<typename Real> __global__ static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (j >= d.rows ) return; //invert divider in shared memory __shared__ Real inv[16]; if(threadIdx.x==0) { inv[threadIdx.y] = 1.0/vec_div[j]; } __syncthreads(); //multiply elements if (i < d.cols && j < d.rows) mat[index] *= inv[threadIdx.y]; } template<typename Real> __global__ static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; int32_cuda index_src = i + j*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j *d.stride; int32_cuda index_src = j + i*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_blocks(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.rows + q * d.cols] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_blocks_trans(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = j + i * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.cols + q * d.rows] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_mat_div_mat(const Real* A, const Real* B, const Real* C, Real* dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, a_index = i + j*stride_a, b_index = i + j*stride_b, c_index = i + j*stride_c; if (i < d.cols && j < d.rows) if (C[c_index] == 0) dst[index] = A[a_index]; else dst[index] = A[a_index] * B[b_index] / C[c_index]; } // Given a matrix input S (not packed!) and a lower-triangular matrix L, // this function does S = beta S + alpha * L^T L. This is used in PSD matrix inversion. // The i index is the row of the destination S and the j the column (although of // course the output is symmetric so it doesn't matter in a sense). The main point // of this is to make use of various symmetries and zero-ness. template<typename Real> __global__ static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim, Real *S, MatrixDim sdim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= sdim.rows || j > i) return; // this thread computes the dot-product of the i'th column of // L with the j'th column of L. The values we're multiplying // are only nonzero for row-index k greater or equal to // max(i, j), which equals i. Real sum = 0.0; for (int k = i; k < sdim.rows; k++) { int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k; sum += T[i_index] * T[j_index]; } int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i; S[output_index1] = alpha * sum + beta * S[output_index1]; S[output_index2] = alpha * sum + beta * S[output_index2]; } template<typename Real> __global__ static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*col[j] + beta*dst[index]; } template<typename Real> __global__ static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*row[i] + beta*dst[index]; } template<typename Real> __global__ static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*dmat.stride; int32_cuda index2 = i + j*dmask.stride; if ( i < dmat.cols && j < dmat.rows ) if(mask[index2] == 0) mat[index] = 0; } template<typename Real> __global__ static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim, const Real *mat2, int mat2_row_stride, int mat2_col_stride, const Real *vec, Real beta) { // Note from Dan: in this kernel, we make the x dimension correspond to the // row index and y to the column index. That was not always the case for // earlier kernels written by others. int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j, index2 = i * mat2_row_stride + j * mat2_col_stride; if (i < mat_dim.rows && j < mat_dim.cols) { mat[index] = alpha * mat2[index2] * vec[j] + beta * mat[index]; } } template<typename Real> __global__ static void _add_mat_mat_elements(Real *data, const Real *srcA_data, const Real *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j*dim.stride; int32_cuda srcA_index = i + j*srcA_stride; int32_cuda srcB_index = i + j*srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index] + beta * data[tgt_index] ; } } /* * CuVector */ // very limited application! template<typename Real> __global__ static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2, Real param_3, int* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { Real ratio = a[i] / param_3; if ( ( ratio < 0.0 ) || ( ratio >= 1.01 )) { *flag = 1; return; } if ( ratio < param_1 ) { Real factor = ((param_1/ratio) > param_2) ? param_2 : (param_1/ratio); v[i] = v[i] / factor; } else if ( ratio > param_1 ) { Real factor = ((ratio/param_1) > param_2) ? param_2 : (ratio/param_1); v[i] = v[i] * factor; } } } template<typename Real> __global__ static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (double) v_in[i]; } } // This kernel writes a copy of the vector "v_in" to each row of the matrix // "m_out". the dimension of v_in should be equal to the #columns of m_out. In // this kernel, following the new pattern, x corresponds to row-index and y to // column-index. template<typename Real> __global__ static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row index. int j = blockIdx.y * blockDim.y + threadIdx.y; // column index. if (i < d.rows && j < d.cols) { int index = i * d.stride + j; m_out[index] = v_in[j]; } } template<typename Real> __global__ static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if ( i < dim) { v_out[i] = (float) v_in[i]; } } template<typename Real> __global__ static void _vec_min(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= CU1DBLOCK) return; __shared__ Real row_data[CU1DBLOCK]; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real min = 1.0 / 0.0; // infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j < min) min = v_j; } row_data[i] = min; __syncthreads(); //get the sum *value = _min_reduce(row_data); } template<typename Real> __global__ static void _vec_max(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.y > 0) return; __shared__ Real row_data[CU1DBLOCK]; if(i >= CU1DBLOCK) return; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real max = -1.0 / 0.0; // -infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j > max) max = v_j; } row_data[i] = max; __syncthreads(); //get the sum *value = _max_reduce(row_data); } // _trace_mat_mat expects to be called with 1 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 0]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; int block_size = (num_elements + num_threads - 1) / num_threads; int loop_start = i * block_size, loop_end = (i + 1) * block_size; if (loop_end > num_elements) loop_end = num_elements; Real sum = 0.0; for (int j = loop_start; j < loop_end; j++) { // for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = row + col * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // _trace_mat_mat_trans expects to be called with 4 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 3]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; // int block_size = (num_elements + num_threads - 1) / num_threads; // int loop_start = i * block_size, loop_end = (i + 1) * block_size; // if (loop_end > num_elements) // loop_end = num_elements; Real sum = 0.0; // for (int j = loop_start; j < loop_end; j++) { for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = col + row * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // Adds diag(M N) to v, where M and N are matrices. We supply row_stride and // col_stride arguments for M and N, and swapping them allows us to transpose // those matrices. Note: we imagine row-major indexing here, just like Kaldi // and CBLAS (but unlike CUBLAS). // This kernel expects the blockDim to be (CU1DBLOCK, 1) and the // gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element. // threads_per_element should be a power of 2. template<typename Real> __global__ static void _add_diag_mat_mat( Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride, int M_col_stride, const Real *N, int N_row_stride, int N_col_stride, int threads_per_element, Real beta) { // we actually assume blockDim.x == CU1DBLOCK here. // Each diagonal element of v is processed by "threads_per_element" threads. __shared__ Real temp_data[CU1DBLOCK]; int i = blockIdx.x * blockDim.x + threadIdx.x; int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells // us which block of elements we sum up. if (v_idx >= v_dim) return; Real sum = 0.0; for (int j = sub_idx; j < M_cols; j += threads_per_element) { int M_index = v_idx * M_row_stride + j * M_col_stride, N_index = j * N_row_stride + v_idx * N_col_stride; sum += M[M_index] * N[N_index]; } temp_data[threadIdx.x] = sum; // start_idx = threadIdx.x - sub_idx; // start of the position in temp_data // that we want to sum up. // The following is a tree-based reduction of the elements of temp_data from // start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx". __syncthreads(); int num_total_threads = threads_per_element; while (num_total_threads > 1) { int half_point = ((1 + num_total_threads) >> 1); if (sub_idx < half_point) { Real temp = 0.0; if (sub_idx + half_point < num_total_threads) { temp = temp_data[threadIdx.x + half_point]; } temp_data[threadIdx.x] += temp; } __syncthreads(); num_total_threads = half_point; } if (sub_idx == 0) { v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x]; } } template<typename Real> __global__ static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) v[i] = alpha * x[i] * y[i] + beta * v[i]; } template<typename Real> __global__ static void _copy_col_from_mat_df(double* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (double) mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_fd(float* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (float) mat[index]; } template<typename Real> __global__ static void _vec_apply_exp(Real* v, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v[i] = exp(v[i]); } } template<typename Real> __global__ static void _vec_apply_log(Real* v, Real* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { if (v[i] < 0) { *flag = 1; return; } v[i] = log(v[i]); } } template<typename Real> __global__ static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z, MatrixDim d, Real* z2, MatrixDim d2, Real* t) { int i = threadIdx.x; __shared__ Real tot_objf[CU1DBLOCK]; __shared__ Real tot_weight[CU1DBLOCK]; Real tmp_weight_sum = 0; Real tmp_tot_objf = 0; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i*size; loop_end = threshold + (i+1)*size; } for(int j = loop_start; j< loop_end; j++) { int m = (x + j)->row; //* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) ); int label = (x + j)->column; //*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int)); Real weight = (x + j)->weight; //*(Real*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) ) + 2 * sizeof(int)); tmp_weight_sum += weight; Real this_prob = *(z + m * d.stride + label); tmp_tot_objf += weight * log(this_prob); *(z2 + m * d2.stride + label ) += weight / this_prob;// there might be problems here.... } tot_objf[i] = tmp_tot_objf; tot_weight[i] = tmp_weight_sum; __syncthreads(); *t = _sum_reduce(tot_objf); __syncthreads(); *(t+1) = _sum_reduce(tot_weight); return; } template<typename Real> __global__ static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha, MatrixElement<Real>* x, int num_elements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_elements) return; data[x[i].row * dim.stride + x[i].column] += alpha * x[i].weight; } template<typename Real> __global__ static void _cuda_matrix_add_indexed_values(MatrixDim dim, Real alpha, const Int32Pair* indices, const Real* x, int s, Real* data) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= s) return; int data_i = indices[i].first * dim.stride + indices[i].second; data[data_i] += alpha * x[i]; } template<typename Real> __global__ static void _matrix_lookup(const Real *data, MatrixDim dim, const Int32Pair *indices, int indices_size, Real *output) { int ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= indices_size) return; int data_ind = indices[ind].first * dim.stride + indices[ind].second; output[ind] = data[data_ind]; } template<typename Real> __global__ static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; //col int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; //row int32_cuda index_mat1 = i + j*mat1_dim.stride; int32_cuda index_mat2 = i + j*mat2_stride; int32_cuda index_mask = i + j*mask_stride; if (i < mat1_dim.cols && j < mat1_dim.rows) mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0); } template<typename Real> __global__ static void _vec_sum(Real *v, Real *sum, int dim, int inc) { int i = threadIdx.x; __shared__ Real row_data[CU1DBLOCK]; if (i >= CU1DBLOCK) return; Real tmp_sum = 0; int size = dim / CU1DBLOCK; //the least size in a loop (later part) int threshold = dim - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i * size; loop_end = threshold + (i+1) * size; } for(int j = loop_start; j< loop_end; j++) { tmp_sum += v[j * inc]; } row_data[threadIdx.x] = tmp_sum; __syncthreads(); *sum = _sum_reduce(row_data); } template<typename Real> __global__ static void _pvec_sum(Real* v, Real* g, int dim, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int start = size * i; if (start >= dim) return; int end = start + size; if (end > dim) end = dim; __shared__ Real row_data[CU1DBLOCK]; Real sum = 0; for (int j = start; j < end; j++) sum += v[j]; row_data[threadIdx.x] = sum; __syncthreads(); g[blockIdx.x] = _sum_reduce(row_data); } template<typename Real> __global__ static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] < floor_val) { v[i] = floor_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _vec_apply_ceiling(Real *v, Real ceiling_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] > ceiling_val) { v[i] = ceiling_val; count[i] = 1; } else { count[i] = 0; } } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_pow(Real* mat, Real power, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { if (power == 1.0) return; if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { if (!(mat[index] >= 0.0)) return; mat[index] = sqrt(mat[index]); } else { mat[index] = pow(mat[index], power); } } } template<typename Real> __global__ static void _apply_pow_abs(Real* mat, Real power, bool include_sign, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { if (include_sign == true && mat[index] < 0) { if (power == 1.0) mat[index] = -std::abs(mat[index]); if (power == 2.0) { mat[index] = -mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = -sqrt(std::abs(mat[index])); } else { mat[index] = -pow(std::abs(mat[index]), power); } } else { if (power == 1.0) mat[index] = std::abs(mat[index]); if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = sqrt(std::abs(mat[index])); } else if (power < 0.0 && mat[index] == 0.0) { mat[index] = 0.0; } else { mat[index] = pow(std::abs(mat[index]), power); } } } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_heaviside(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0); } } template<typename Real> __global__ static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (mat[index] < floor_val) mat[index] = floor_val; } } template<typename Real> __global__ static void _copy_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[j], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = i * src_stride + reorder[j]; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0.0; } } } template<typename Real> __global__ static void _add_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[j], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = i * src_stride + reorder[j]; Real val = src[src_index]; dst[dst_index] += val; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[i], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = reorder[i] * src_stride + j; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int dst_index = i * dst_dim.stride + j; if (src[i] != NULL) { dst[dst_index] = src[i][j]; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_to_rows(Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < src_dim.rows && j < src_dim.cols) { if (dst[i] != NULL) { dst[i][j] = src[i * src_dim.stride + j]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int dst_index = i * dst_dim.stride + j; if (reorder[i] >= 0) { int src_index = reorder[i] * src_stride + j; dst[dst_index] += alpha * src[src_index]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int dst_index = i * dst_dim.stride + j; if (src[i] != NULL) { dst[dst_index] += alpha * src[i][j]; } } } template<typename Real> __global__ static void _add_to_rows(Real alpha, Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < src_dim.rows && j < src_dim.cols) { if (dst[i] != NULL) { dst[i][j] += alpha * src[i * src_dim.stride + j]; } } } template<typename Real> __global__ static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows ) { if (mat[index] > ceiling_val) mat[index] = ceiling_val; } } template<typename Real> __global__ static void _invert_elements(Real* data, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j*d.stride; if (i < d.cols && j < d.rows) data[index] = 1.0/data[index]; } // matrix-wise, do data = alpha * data + beta * A * B^T, // where B is a block matrix. template<typename Real> __global__ static void _add_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[j]; int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset, B_num_rows = block_data.matrix_dim.rows, B_num_cols = block_data.matrix_dim.cols, B_row_stride = block_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(block_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < B_num_cols; k++) { const Real *this_B_col = B_data + k; const Real *this_A_row = A_data + i * A_row_stride + B_row_start * A_col_stride; // this_A_row points to the element A[i][B_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < B_num_rows; l++) // l indexes rows of B. sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + B_col_start); data[index] = alpha * sum + beta * data[index]; } } // For a block matrix B, does B = alpha * C * D + beta * B. // the (x,y,z) indices are the block index, then the row // and column indices within the block. Note: transposition of C and D // is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride), // so it's invisible to this code. The num-cols and num-rows of C and D // are only provided to the extent that they are not already determined // by other quantities. template<typename Real> __global__ static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks, const Real *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const Real *D_data, int D_row_stride, int D_col_stride, Real alpha, Real beta) { int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B. int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block if (b >= num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[b]; if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols) return; // we're outside the dimensions of the b'th block. // B_elem is the element of B we're writing to. Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data) + i * block_data.matrix_dim.stride + j; Real B_val = *B_elem; // B_row and B_col are the (row, col) index into the full matrix B. int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j; const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data + D_col_stride * B_col; Real sum = 0.0; for (int k = 0; k < C_num_cols; k++) { sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride]; } *B_elem = alpha * sum + beta * B_val; } template<typename Real> __global__ static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } // Since this is a newer kernel, x is the row-index and y is the // column-index. template<typename Real> __global__ static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indices) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride + indices[col].first, src_end_index = row * src_dim.stride + indices[col].second; Real sum = 0.0; for (int index = src_start_index; index < src_end_index; index++) sum += src_data[index]; data[dst_index] = sum; } template<typename Real> __global__ static void _add_row_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indexes) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col; int src_index_start = indexes[row].first, src_index_end = indexes[row].second; for (int row_index = src_index_start; row_index < src_index_end; row_index++) data[dst_index] += src_data[row_index * src_dim.stride + col]; } template<typename Real> __global__ static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; // compute the function y[index] = log(1 + exp(x[index])) if(i < d.cols && j < d.rows) { Real val = x[src_index], result; if (val >= 10.0) result = val; // function approaches y=x as x gets large else result = log1p(exp(val)); y[dst_index] = result; } } template<typename Real> __global__ static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; Real tmp = 0; int src_begin_index = i * group_size + j * src_stride; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { tmp += pow(std::abs(x[src_index]), power); } tmp = pow(tmp, Real(1.0 / power)); if (!isnan(tmp)) { y[dst_index] = tmp; } else { Real max_value = x[src_begin_index], min_value = max_value; for (int src_index = src_begin_index + 1; src_index < src_end_index; src_index ++) { if (x[src_index] > max_value) max_value = x[src_index]; if (x[src_index] < min_value) min_value = x[src_index]; } tmp = 0.0; Real max_abs_value = (max_value > -min_value ? max_value : -min_value); // let max_value be the // largest abs(value) if (max_abs_value == 0) { y[dst_index] = 0.0; } else { for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { Real x_scaled = x[src_index] / max_abs_value; tmp += pow(std::abs(x_scaled), Real(power)); } y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value; } } } } template<typename Real> __global__ static void _group_max(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; int src_begin_index = i * group_size + j * src_stride; Real max_value = -1e20; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { if (!isnan(x[src_index]) && x[src_index] > max_value) max_value = x[src_index]; } y[dst_index] = max_value; } } /* * cu:: */ template<typename Real> __global__ static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = 1.0 / (1.0 + exp(-x[src_index])); y[dst_index] = res; } } template<typename Real> __global__ static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = y[y_index]*(1.0-y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j * src_stride; if(i < d.cols && j < d.rows) { Real exp_2x = exp(2.0*x[src_index]); Real res; if(isinf(exp_2x)) { res = 1.0; } else { res = (exp_2x - 1.0) / (exp_2x + 1.0); } y[dst_index] = res; } } template<typename Real> __global__ static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (1.0 - y[y_index]*y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; //copy input to aux aux[threadIdx.x] = x[threadIdx.x+j*d.stride]; for(int i=1; i<steps; ++i) { if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride]) aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride]; } //get the maximum value int nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real max = aux[0]; __syncthreads(); // subtract max, apply exp, sum up... y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max); aux[threadIdx.x] = y[threadIdx.x+j*d.stride]; for(int i=1; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max); aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride]; } } nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real sum = aux[0]; __syncthreads(); //normalize by sum... for(int i=0; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum; } } } template<typename Real> __global__ static void _log_softmax_reduce(Real *y, const Real *x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; // Maximum step 1: loads input data to <aux>. If <d.cols> is larger than // <blockDim.x>, then we do a first pass filtering and only // keep a <blockDim.x> size array. aux[threadIdx.x] = x[threadIdx.x + j * d.stride]; for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x + i * THREADS + j * d.stride]) aux[threadIdx.x] = x[threadIdx.x + i * THREADS + j * d.stride]; } // Maximum step 2: the standard max reduce. int nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x + halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real max = aux[0]; __syncthreads(); // Log sum step 1: substracts max, and takes exponentials. y[threadIdx.x + j * d.stride] = x[threadIdx.x + j * d.stride] - max; aux[threadIdx.x] = exp(y[threadIdx.x + j * d.stride]); for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] = x[threadIdx.x + i * THREADS + j * d.stride] - max; aux[threadIdx.x] += exp(y[threadIdx.x + i * THREADS + j * d.stride]); } } // Log sum step 2: comptes summation and then takes logarithm. nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real log_sum = log(aux[0]); __syncthreads(); // Computes log softmax. for (int i = 0; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] -= log_sum; } } } template<typename Real> __global__ static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = i % d_in.cols; int32_cuda src_row = j + off[i / d_in.cols]; if(src_row < 0) src_row = 0; if(src_row >= d_in.rows) src_row = d_in.rows-1; y[index] = x[src_col + src_row*d_in.stride]; } } template<typename Real> __global__ static void _take_mean(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index1 = i + j * d_in.stride; int32_cuda index2 = j + i * d_in.stride; if (i <= j && j < d_in.rows) { int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = 0.5 * (x[index1] + x[index2]); } } template<typename Real> __global__ static void _take_lower(const Real* x, Real* y, MatrixDim d_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j > i || i >= d_in.rows) return; int index = i * d_in.stride + j; Real val = x[index]; int index_sp = (i * (i+1) / 2) + j; y[index_sp] = val; } template<typename Real> __global__ static void _take_upper(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j < i || j >= d_in.rows) return; int32_cuda index = i * d_in.stride + j; int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = x[index]; } template<typename Real> __global__ static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1) * (i+2) / 2) - 1; if (i < dim) { y[i] = x[index]; } } template<typename Real> __global__ static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dim.rows && j < dim.cols) { int dst_index = i * dim.stride + j, src_index; if (j <= i) { // no transpose src_index = (i * (i+1) / 2) + j; } else { // transpose. src_index = (j * (j+1) / 2) + i; } y[dst_index] = x[src_index]; } } template<typename Real> __global__ static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = copy_from[i]; if(src_col >= 0 && src_col < d_in.cols) { y[index] = x[src_col + j*d_in.stride]; } else { y[index] = 1.0/0.0; } } } template<typename Real> __global__ static void _one(Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { x[i] = 1.0; } } template<typename Real> __global__ static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_row = copy_from[j]; y[index] = x[i + src_row*d_in.stride]; } } template<typename Real> __global__ static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, grad_index = i + j*stride_grad; if (i < d.cols && j < d.rows) { if(wei[index]==0.0) return; //skip L1 if zero weight! Real l1_signed = l1; if(wei[index] < 0.0) //flip sign l1_signed = -l1; Real before = wei[index]; Real after = wei[index] -lr*grad[grad_index] -l1_signed;//simulate update if((after > 0.0) ^ (before > 0.0)) { //sign changed? wei[index] = 0.0; grad[grad_index] = 0.0; } else { wei[index] -= l1_signed; } } } template<typename Real> __global__ static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(blockIdx.x > 0) return; if(blockDim.y != 1) return; __shared__ Real value[CU1DBLOCK]; __shared__ int32_cuda index[CU1DBLOCK]; //copy to shared memory value[threadIdx.x] = mat[i+j*d.stride]; index[threadIdx.x] = threadIdx.x; __syncthreads(); //get the id of the max value int32_cuda out_max = _max_id_reduce(value, index); __syncthreads(); //see if it's bigger value if(threadIdx.x == 0) { if(vec_val[j] <= mat[out_max+j*d.stride]) { vec_val[j] = mat[out_max+j*d.stride]; vec_id[j] = voff+out_max; } } } template<typename Real> __global__ static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(i>0) return; if(j<d.rows) { int32_cuda index = vec_tgt[j] + j*d.stride; Real value = mat_net_out[index]; if(value < 1e-20) value = 1e-20; vec_log_post[j] = log(value); mat_net_out[index] -= 1.0; } } /*********************************************************************** * ANSI-C wrappers of CUDA kernels */ /* * "int32" */ void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } /* * "float" */ /* * CuMatrix */ void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { _copy_upp_low<<<Gr,Bl>>>(A,dimA); } void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { _copy_low_upp<<<Gr,Bl>>>(A,dimA); } void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *vec, const float *mat2, int mat2_row_stride, int mat2_col_stride, float beta) { _add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaF_transpose_matrix(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _transpose_matrix<<<Gr,Bl>>>(mat, d); } void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_exp<<<Gr,Bl>>>(mat,d); } void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) { _apply_pow<<<Gr,Bl>>>(mat, power, d); } void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power, bool include_sign, MatrixDim d) { _apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d); } void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_heaviside<<<Gr,Bl>>>(mat, d); } void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_add_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows_direct(dim3 Gr, dim3 Bl, float* dst, const float* const* src, MatrixDim dst_dim) { _copy_rows<<<Gr,Bl>>>(dst, src, dst_dim); } void cudaF_copy_to_rows_direct(dim3 Gr, dim3 Bl, float* const* dst, const float* src, MatrixDim src_dim) { _copy_to_rows<<<Gr,Bl>>>(dst, src, src_dim); } void cudaF_add_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, dst_dim, src_stride); } void cudaF_add_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* const* src, MatrixDim dst_dim) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, dst_dim); } void cudaF_add_to_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* const* dst, const float* src, MatrixDim src_dim) { _add_to_rows<<<Gr,Bl>>>(alpha, dst, src, src_dim); } void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { _apply_floor<<<Gr,Bl>>>(mat, floor_val, d); } void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) { _apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d); } void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) { _set_diag<<<Gr,Bl>>>(mat,value,d); } void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _set_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _add_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _set_zero_above_diag<<<Gr,Bl>>>(mat, d); } void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _scale_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _scale<<<Gr,Bl>>>(mat,value,d); } void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_log<<<Gr,Bl>>>(mat,d); } void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _div_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _max<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { _mul_cols_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { _mul_rows_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { _mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaF_calc_pnorm_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size, float power) { _calc_pnorm_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size, power); } void cudaF_calc_group_max_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size) { _calc_group_max_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size); } void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) { _div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d); } void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } else { _add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } } void cudaF_add_mat_blocks(dim3 Gr, dim3 Bl, float alpha, const float* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_blocks_trans<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { _add_mat_blocks<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaF_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B, const float *C, float *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { _add_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d, stride_a, stride_b, stride_c); } void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T, MatrixDim tdim, float *S, MatrixDim sdim) { _sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim); } void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) { _add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d); } void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) { _add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d); } void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *mat2, int mat2_row_stride, int mat2_col_stride, const float *vec, float beta) { _add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { _add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { _apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask); } /* * CuVector */ void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig, float changed) { _replace_value<<<Gr,Bl>>>(v, dim, orig, changed); } void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a, float param_1, float param_2, float param_3, int* flag, int dim) { _set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim); } void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) { _copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) { _copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) { _vec_mul_elements<<<Gr,Bl>>>(v, a, dim); } void cudaF_vec_min(const float* v, float* value, int dim) { _vec_min<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaF_vec_max(const float* v, float* value, int dim) { _vec_max<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaF_trace_mat_mat_trans(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { _trace_mat_mat_trans<float,4> <<<4,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaF_trace_mat_mat(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { _trace_mat_mat<float,2> <<<2,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M, int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride, int N_col_stride, int threads_per_element, float beta) { _add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) { _add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim); } void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) { _vec_sum<<<Gr,Bl>>>(v, value, dim, inc); } void cudaF_pvec_sum(int Gr, int Bl, float* v, float* pvec_sum, int dim, int size) { _pvec_sum<<<Gr,Bl>>>(v, pvec_sum, dim, size); } void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, float alpha, MatrixElement<float>* x, int num_elements) { _cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, num_elements); } void cudaF_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, float alpha, const Int32Pair* indices, const float* x, int s, float* data) { _cuda_matrix_add_indexed_values<<<Gr, Bl>>>(dim, alpha, indices, x, s, data); } void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s, const float* z, MatrixDim d, float* z2, MatrixDim d2, float* t) { _cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t); } void cudaD_comp_obj_deriv(dim3 Gr,dim3 Bl, MatrixElement<double>* x, int s, const double* z, MatrixDim d, double* z2, MatrixDim d2, double* t) { _cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t); } void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst, const float *src, int dim) { _vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim); } void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) { _vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim); } void cudaF_vec_apply_ceiling(int Gr, int Bl, float* v, float ceiling_val, float *count, int dim) { _vec_apply_ceiling<<<Gr,Bl>>>(v, ceiling_val,count,dim); } void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) { _vec_apply_exp<<<Gr,Bl>>>(v,dim); } void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) { _vec_apply_log<<<Gr,Bl>>>(v,flag,dim); } void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) { _invert_elements<<<Gr,Bl>>>(data, d); } void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d, const float *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, float alpha, float beta, int B_trans) { if (B_trans) { _add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { _add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const float *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const float *D_data, int D_row_stride, int D_col_stride, float alpha, float beta) { _block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaF_soft_hinge (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size, float power) { _group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power); } void cudaF_group_max(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { _group_max<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _sigmoid<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { _diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _tanh<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { _diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { _softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_log_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { _log_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { _splice<<<Gr,Bl>>>(y,x,off,d_out,d_in); } void cudaF_one(int Gr, int Bl, float* x, int dim) { _one<<<Gr,Bl>>>(x,dim); } void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_mean<<<Gr,Bl>>>(x,y,d_in); } void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_lower<<<Gr,Bl>>>(x,y,d_in); } void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_upper<<<Gr,Bl>>>(x,y,d_in); } void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim dim) { _copy_from_sp<<<Gr,Bl>>>(x, y, dim); } void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) { _regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad); } void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { _find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d); } void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) { _diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d); } void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) { _copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in); } void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const float* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indices) { _sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices); } void cudaF_add_row_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indexes) { _add_row_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indexes); } void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim, const Int32Pair *indices, int indices_size, float *output) { _matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output); } void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1, const float *mat2, float *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { _equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* * "double" */ /* * CuMatrix */ void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { _copy_upp_low<<<Gr,Bl>>>(A,dimA); } void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { _copy_low_upp<<<Gr,Bl>>>(A,dimA); } void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *vec, const double *mat2, int mat2_row_stride, int mat2_col_stride, double beta) { _add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaD_transpose_matrix(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _transpose_matrix<<<Gr,Bl>>>(mat, d); } void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_exp<<<Gr,Bl>>>(mat,d); } void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) { _apply_pow<<<Gr,Bl>>>(mat, power, d); } void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power, bool include_sign, MatrixDim d) { _apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d); } void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_heaviside<<<Gr,Bl>>>(mat, d); } void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_add_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows_direct(dim3 Gr, dim3 Bl, double* dst, const double* const* src, MatrixDim dst_dim) { _copy_rows<<<Gr,Bl>>>(dst, src, dst_dim); } void cudaD_copy_to_rows_direct(dim3 Gr, dim3 Bl, double* const* dst, const double* src, MatrixDim src_dim) { _copy_to_rows<<<Gr,Bl>>>(dst, src, src_dim); } void cudaD_add_rows(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, dst_dim, src_stride); } void cudaD_add_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* const* src, MatrixDim dst_dim) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, dst_dim); } void cudaD_add_to_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* const* dst, const double* src, MatrixDim src_dim) { _add_to_rows<<<Gr,Bl>>>(alpha, dst, src, src_dim); } void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { _apply_floor<<<Gr,Bl>>>(mat, floor_val, d); } void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) { _apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d); } void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) { _set_diag<<<Gr,Bl>>>(mat,value,d); } void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _set_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _add_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _set_zero_above_diag<<<Gr,Bl>>>(mat, d); } void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _scale_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _scale<<<Gr,Bl>>>(mat,value,d); } void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_log<<<Gr,Bl>>>(mat,d); } void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _div_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _max<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { _mul_cols_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { _mul_rows_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { _mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaD_calc_pnorm_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size, double power) { _calc_pnorm_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size, power); } void cudaD_calc_group_max_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size) { _calc_group_max_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size); } void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) { _div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d); } void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } else { _add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } } void cudaD_add_mat_blocks(dim3 Gr, dim3 Bl, double alpha, const double* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_blocks_trans<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { _add_mat_blocks<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaD_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A, const double *B, const double *C, double *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { _add_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d,stride_a,stride_b,stride_c); } void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta, const double* T, MatrixDim tdim, double *S, MatrixDim sdim) { _sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim); } void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) { _add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d); } void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) { _add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d); } void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *mat2, int mat2_row_stride, int mat2_col_stride, const double *vec, double beta) { _add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { _add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { _apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask); } /* * CuVector */ void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig, double changed) { _replace_value<<<Gr,Bl>>>(v, dim, orig, changed); } void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a, double param_1, double param_2, double param_3, int* flag, int dim) { _set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim); } void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) { _copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) { _copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) { _vec_mul_elements<<<Gr,Bl>>>(v, a, dim); } void cudaD_vec_min(const double* v, double* value, int dim) { _vec_min<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaD_vec_max(const double* v, double* value, int dim) { _vec_max<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaD_trace_mat_mat_trans(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { _trace_mat_mat_trans<double,4> <<<4,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaD_trace_mat_mat(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { _trace_mat_mat<double,2> <<<2,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M, int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride, int N_col_stride, int threads_per_element, double beta) { _add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) { _add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim); } void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const double* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) { _vec_sum<<<Gr,Bl>>>(v,value,dim,inc); } void cudaD_pvec_sum(int Gr, int Bl, double* v, double* pvec_sum, int dim, int size) { _pvec_sum<<<Gr,Bl>>>(v,pvec_sum,dim,size); } void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, double alpha, MatrixElement<double>* x, int num_elements) { _cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, num_elements); } void cudaD_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, double alpha, const Int32Pair* indices, const double* x, int s, double* data) { _cuda_matrix_add_indexed_values<<<Gr, Bl>>>(dim, alpha, indices, x, s, data); } void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst, const double *src, int dim) { _vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim); } void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) { _vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim); } void cudaD_vec_apply_ceiling(int Gr, int Bl, double* v, double ceiling_val, float *count, int dim) { _vec_apply_ceiling<<<Gr,Bl>>>(v,ceiling_val,count,dim); } void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) { _vec_apply_exp<<<Gr,Bl>>>(v,dim); } void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) { _vec_apply_log<<<Gr,Bl>>>(v,flag,dim); } void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) { _invert_elements<<<Gr,Bl>>>(data, d); } void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d, const double *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, double alpha, double beta, int B_trans) { if (B_trans) { _add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { _add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const double *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const double *D_data, int D_row_stride, int D_col_stride, double alpha, double beta) { _block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaD_soft_hinge (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size, double power) { _group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power); } void cudaD_group_max(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { _group_max<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _sigmoid<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { _diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _tanh<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { _diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { _softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_log_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { _log_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { _splice<<<Gr,Bl>>>(y,x,off,d_out,d_in); } void cudaD_one(int Gr, int Bl, double* x, int dim) { _one<<<Gr,Bl>>>(x,dim); } void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_mean<<<Gr,Bl>>>(x,y,d_in); } void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_lower<<<Gr,Bl>>>(x,y,d_in); } void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_upper<<<Gr,Bl>>>(x,y,d_in); } void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_out) { _copy_from_sp<<<Gr,Bl>>>(x,y,d_out); } void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d,int stride_grad) { _regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad); } void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { _find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d); } void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) { _diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d); } void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) { _copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in); } void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indices) { _sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices); } void cudaD_add_row_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indexes) { _add_row_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indexes); } void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim, const Int32Pair *indices, int indices_size, double *output) { _matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output); } void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1, const double *mat2, double *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { _equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* Some conversion kernels for which it's more convenient to not name them F or D. */ void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_smat_ff(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cudaF_trace_mat_smat(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { _trace_mat_smat<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaF_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { _trace_mat_smat_trans<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { _trace_mat_smat<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { _trace_mat_smat_trans<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); }
2789787be0d0420e2b9ed67b3722787418acfebb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/layers/softmax_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; } } template <typename Dtype> __global__ void kernel_channel_subtract(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_max, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] -= channel_max[n * spatial_dim + s]; } } template <typename Dtype> __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template <typename Dtype> __global__ void kernel_channel_dot(const int num, const int channels, const int spatial_dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype dot = 0; for (int c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); } channel_dot[index] = dot; } } template <typename Dtype> void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { LOG(INFO) << "USE GPU!!"; const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); int count = bottom[0]->count(); int channels = top[0]->shape(softmax_axis_); caffe_copy(count, bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data, scale_data); // subtract // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_, scale_data, top_data); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_data, top_data); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data, scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_, scale_data, top_data); } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); } // namespace caffe
2789787be0d0420e2b9ed67b3722787418acfebb.cu
#include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/layers/softmax_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; } } template <typename Dtype> __global__ void kernel_channel_subtract(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_max, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] -= channel_max[n * spatial_dim + s]; } } template <typename Dtype> __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template <typename Dtype> __global__ void kernel_channel_dot(const int num, const int channels, const int spatial_dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype dot = 0; for (int c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); } channel_dot[index] = dot; } } template <typename Dtype> void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { LOG(INFO) << "USE GPU!!"; const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); int count = bottom[0]->count(); int channels = top[0]->shape(softmax_axis_); caffe_copy(count, bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_), CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data, scale_data); // subtract // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_, scale_data, top_data); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_data, top_data); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_), CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data, scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_, scale_data, top_data); } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); } // namespace caffe
e5bd8f580d022105def2ff22d11f4fe6d0429974.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/loss/max_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { static __global__ void forward_kernel(int count, int channels,int spatial_dim, const float *pred_data, const float *label_data, float *loss_data) { CUDA_KERNEL_LOOP(i, count) { int n = i / spatial_dim; int s = i % spatial_dim; int label = label_data[i]; float sum = 0; for (int c=0;c<channels;c++) { if (c != label) sum += abs(pred_data[(n*channels+c)*spatial_dim+s]); else sum += abs(1 - pred_data[(n*channels+c)*spatial_dim+s]); } loss_data[i] = sum; } } static __global__ void backward_kernel(int count, int channels,int spatial_dim, const float *pred_data, const float *label_data, float *pred_diff) { CUDA_KERNEL_LOOP(i, count) { int n = i / spatial_dim; int s = i % spatial_dim; int label = label_data[i]; for (int c=0;c<channels;c++) { if (c != label) pred_diff[(n*channels+c)*spatial_dim+s] = pred_data[(n*channels+c)*spatial_dim+s]>0? 1:-1; else pred_diff[(n*channels+c)*spatial_dim+s] = pred_data[(n*channels+c)*spatial_dim+s]-1>0? 1:-1; } } } void MaxLossLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); hipLaunchKernelGGL(( forward_kernel), dim3(CAFFE_GET_BLOCKS(num*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num*height*width,channels,height*width,bottom[0]->gpu_data(),bottom[1]->gpu_data(),loss_.mutable_gpu_data()); float loss; caffe_gpu_asum(num*height*width, loss_.gpu_data(), &loss); top[0]->mutable_cpu_data()[0] = loss / float(num*height*width); } void MaxLossLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); if (Caffe::second_pass() == false) { hipLaunchKernelGGL(( backward_kernel), dim3(CAFFE_GET_BLOCKS(num*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num*height*width,channels,height*width,bottom[0]->gpu_data(),bottom[1]->gpu_data(),bottom[0]->mutable_gpu_diff()); const float loss_weight = top[0]->cpu_diff()[0] / float(num*height*width); caffe_gpu_scal(bottom[0]->count(), loss_weight, bottom[0]->mutable_gpu_diff()); } } void MaxLossLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { } } // namespace caffe
e5bd8f580d022105def2ff22d11f4fe6d0429974.cu
#include <vector> #include "caffe/layers/loss/max_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { static __global__ void forward_kernel(int count, int channels,int spatial_dim, const float *pred_data, const float *label_data, float *loss_data) { CUDA_KERNEL_LOOP(i, count) { int n = i / spatial_dim; int s = i % spatial_dim; int label = label_data[i]; float sum = 0; for (int c=0;c<channels;c++) { if (c != label) sum += abs(pred_data[(n*channels+c)*spatial_dim+s]); else sum += abs(1 - pred_data[(n*channels+c)*spatial_dim+s]); } loss_data[i] = sum; } } static __global__ void backward_kernel(int count, int channels,int spatial_dim, const float *pred_data, const float *label_data, float *pred_diff) { CUDA_KERNEL_LOOP(i, count) { int n = i / spatial_dim; int s = i % spatial_dim; int label = label_data[i]; for (int c=0;c<channels;c++) { if (c != label) pred_diff[(n*channels+c)*spatial_dim+s] = pred_data[(n*channels+c)*spatial_dim+s]>0? 1:-1; else pred_diff[(n*channels+c)*spatial_dim+s] = pred_data[(n*channels+c)*spatial_dim+s]-1>0? 1:-1; } } } void MaxLossLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); forward_kernel<<<CAFFE_GET_BLOCKS(num*height*width), CAFFE_CUDA_NUM_THREADS>>> (num*height*width,channels,height*width,bottom[0]->gpu_data(),bottom[1]->gpu_data(),loss_.mutable_gpu_data()); float loss; caffe_gpu_asum(num*height*width, loss_.gpu_data(), &loss); top[0]->mutable_cpu_data()[0] = loss / float(num*height*width); } void MaxLossLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); if (Caffe::second_pass() == false) { backward_kernel<<<CAFFE_GET_BLOCKS(num*height*width), CAFFE_CUDA_NUM_THREADS>>> (num*height*width,channels,height*width,bottom[0]->gpu_data(),bottom[1]->gpu_data(),bottom[0]->mutable_gpu_diff()); const float loss_weight = top[0]->cpu_diff()[0] / float(num*height*width); caffe_gpu_scal(bottom[0]->count(), loss_weight, bottom[0]->mutable_gpu_diff()); } } void MaxLossLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { } } // namespace caffe
e7c6f9eedbd332149209d6b66d7d0e12a8e77663.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: joaander #include "ComputeThermoGPU.cuh" #include "VectorMath.h" #include <assert.h> //! Shared memory used in reducing the sums extern __shared__ Scalar3 compute_thermo_sdata[]; //! Shared memory used in final reduction extern __shared__ Scalar4 compute_thermo_final_sdata[]; //! Shared memory used in reducing the sums of the pressure tensor extern __shared__ Scalar compute_pressure_tensor_sdata[]; //! Shared memory used in reducing the sum of the rotational kinetic energy extern __shared__ Scalar compute_ke_rot_sdata[]; /*! \file ComputeThermoGPU.cu \brief Defines GPU kernel code for computing thermodynamic properties on the GPU. Used by ComputeThermoGPU. */ //! Perform partial sums of the thermo properties on the GPU /*! \param d_scratch Scratch space to hold partial sums. One element is written per block \param d_net_force Net force / pe array from ParticleData \param d_net_virial Net virial array from ParticleData \param virial_pitch pitch of 2D virial array \param d_velocity Particle velocity and mass array from ParticleData \param d_group_members List of group members for which to sum properties \param work_size Number of particles in the group this GPU processes \param offset Offset of this GPU in list of group members \param block_offset Offset of this GPU in the array of partial sums All partial sums are packaged up in a Scalar4 to keep pointer management down. - 2*Kinetic energy is summed in .x - Potential energy is summed in .y - W is summed in .z One thread is executed per group member. That thread reads in the values for its member into shared memory and then the block performs a reduction in parallel to produce a partial sum output for the block. These partial sums are written to d_scratch[blockIdx.x]. sizeof(Scalar3)*block_size of dynamic shared memory are needed for this kernel to run. */ __global__ void gpu_compute_thermo_partial_sums(Scalar4 *d_scratch, Scalar4 *d_net_force, Scalar *d_net_virial, const unsigned int virial_pitch, Scalar4 *d_velocity, unsigned int *d_group_members, unsigned int work_size, unsigned int offset, unsigned int block_offset) { // determine which particle this thread works on int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar3 my_element; // element of scratch space read in if (group_idx < work_size) { unsigned int idx = d_group_members[group_idx+offset]; // update positions to the next timestep and update velocities to the next half step Scalar4 net_force = d_net_force[idx]; Scalar net_isotropic_virial; // (1/3)*trace of virial tensor net_isotropic_virial = Scalar(1.0/3.0)* (d_net_virial[0*virial_pitch+idx] // xx +d_net_virial[3*virial_pitch+idx] // yy +d_net_virial[5*virial_pitch+idx]); // zz Scalar4 vel = d_velocity[idx]; Scalar mass = vel.w; // compute our contribution to the sum my_element.x = mass * (vel.x*vel.x + vel.y*vel.y + vel.z*vel.z); my_element.y = net_force.w; my_element.z = net_isotropic_virial; } else { // non-participating thread: contribute 0 to the sum my_element = make_scalar3(0, 0, 0); } compute_thermo_sdata[threadIdx.x] = my_element; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { compute_thermo_sdata[threadIdx.x].x += compute_thermo_sdata[threadIdx.x + offs].x; compute_thermo_sdata[threadIdx.x].y += compute_thermo_sdata[threadIdx.x + offs].y; compute_thermo_sdata[threadIdx.x].z += compute_thermo_sdata[threadIdx.x + offs].z; } offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) { Scalar3 res = compute_thermo_sdata[0]; d_scratch[block_offset + blockIdx.x] = make_scalar4(res.x, res.y, res.z, 0); } } //! Perform partial sums of the pressure tensor on the GPU /*! \param d_scratch Scratch space to hold partial sums. One element is written per block \param d_net_force Net force / pe array from ParticleData \param d_net_virial Net virial array from ParticleData \param virial_pitch pitch of 2D virial array \param d_velocity Particle velocity and mass array from ParticleData \param d_group_members List of group members for which to sum properties \param work_size Number of particles in the group \param offset Offset of this GPU in the list of group members \param block_offset Offset of this GPU in the array of partial sums \param num_blocks Total number of partial sums by all GPUs One thread is executed per group member. That thread reads in the six values (components of the pressure tensor) for its member into shared memory and then the block performs a reduction in parallel to produce a partial sum output for the block. These partial sums are written to d_scratch[i*gridDim.x + blockIdx.x], where i=0..5 is the index of the component. For this kernel to run, 6*sizeof(Scalar)*block_size of dynamic shared memory are needed. */ __global__ void gpu_compute_pressure_tensor_partial_sums(Scalar *d_scratch, Scalar4 *d_net_force, Scalar *d_net_virial, const unsigned int virial_pitch, Scalar4 *d_velocity, unsigned int *d_group_members, unsigned int work_size, unsigned int offset, unsigned int block_offset, unsigned int num_blocks) { // determine which particle this thread works on int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar my_element[6]; // element of scratch space read in if (group_idx < work_size) { unsigned int idx = d_group_members[group_idx+offset]; // compute contribution to pressure tensor and store it in my_element Scalar4 vel = d_velocity[idx]; Scalar mass = vel.w; my_element[0] = mass*vel.x*vel.x + d_net_virial[0*virial_pitch+idx]; // xx my_element[1] = mass*vel.x*vel.y + d_net_virial[1*virial_pitch+idx]; // xy my_element[2] = mass*vel.x*vel.z + d_net_virial[2*virial_pitch+idx]; // xz my_element[3] = mass*vel.y*vel.y + d_net_virial[3*virial_pitch+idx]; // yy my_element[4] = mass*vel.y*vel.z + d_net_virial[4*virial_pitch+idx]; // yz my_element[5] = mass*vel.z*vel.z + d_net_virial[5*virial_pitch+idx]; // zz } else { // non-participating thread: contribute 0 to the sum my_element[0] = 0; my_element[1] = 0; my_element[2] = 0; my_element[3] = 0; my_element[4] = 0; my_element[5] = 0; } for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i*blockDim.x+threadIdx.x] = my_element[i]; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i*blockDim.x+threadIdx.x] += compute_pressure_tensor_sdata[i*blockDim.x + threadIdx.x + offs]; } offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) { for (unsigned int i = 0; i < 6; i++) d_scratch[num_blocks * i + blockIdx.x + block_offset] = compute_pressure_tensor_sdata[i*blockDim.x]; } } //! Perform partial sums of the rotational KE on the GPU /*! \param d_scratch Scratch space to hold partial sums. One element is written per block \param d_orientation Orientation quaternions from ParticleData \param d_angmom Conjugate quaternions from ParticleData \param d_inertia Moments of inertia from ParticleData \param d_group_members List of group members for which to sum properties \param work_size Number of particles in the group processed by this GPU \param offset Offset of this GPU in the list of group members \param block_offset Output offset of this GPU */ __global__ void gpu_compute_rotational_ke_partial_sums(Scalar *d_scratch, const Scalar4 *d_orientation, const Scalar4 *d_angmom, const Scalar3 *d_inertia, unsigned int *d_group_members, unsigned int work_size, unsigned int offset, unsigned int block_offset) { // determine which particle this thread works on int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar my_element; // element of scratch space read in if (group_idx < work_size) { unsigned int idx = d_group_members[group_idx+offset]; // update positions to the next timestep and update velocities to the next half step quat<Scalar> q(d_orientation[idx]); quat<Scalar> p(d_angmom[idx]); vec3<Scalar> I(d_inertia[idx]); quat<Scalar> s(Scalar(0.5)*conj(q)*p); Scalar ke_rot(0.0); if (I.x >= EPSILON) { ke_rot += s.v.x*s.v.x/I.x; } if (I.y >= EPSILON) { ke_rot += s.v.y*s.v.y/I.y; } if (I.z >= EPSILON) { ke_rot += s.v.z*s.v.z/I.z; } // compute our contribution to the sum my_element = ke_rot*Scalar(1.0/2.0); } else { // non-participating thread: contribute 0 to the sum my_element = Scalar(0.0); } compute_ke_rot_sdata[threadIdx.x] = my_element; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) compute_ke_rot_sdata[threadIdx.x] += compute_ke_rot_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) { d_scratch[blockIdx.x + block_offset] = compute_ke_rot_sdata[0]; } } //! Complete partial sums and compute final thermodynamic quantities (for pressure, only isotropic contribution) /*! \param d_properties Property array to write final values \param d_scratch Partial sums \param d_scratch_rot Partial sums of rotational kinetic energy \param ndof Number of degrees of freedom this group possesses \param box Box the particles are in \param D Dimensionality of the system \param group_size Number of particles in the group \param num_partial_sums Number of partial sums in \a d_scratch \param external_virial External contribution to virial (1/3 trace) \param external_energy External contribution to potential energy Only one block is executed. In that block, the partial sums are read in and reduced to final values. From the final sums, the thermodynamic properties are computed and written to d_properties. sizeof(Scalar4)*block_size bytes of shared memory are needed for this kernel to run. */ __global__ void gpu_compute_thermo_final_sums(Scalar *d_properties, Scalar4 *d_scratch, Scalar *d_scratch_rot, unsigned int ndof, BoxDim box, unsigned int D, unsigned int group_size, unsigned int num_partial_sums, Scalar external_virial, Scalar external_energy ) { Scalar4 final_sum = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0),Scalar(0.0)); // sum up the values in the partial sum via a sliding window for (int start = 0; start < num_partial_sums; start += blockDim.x) { __syncthreads(); if (start + threadIdx.x < num_partial_sums) { Scalar4 scratch = d_scratch[start + threadIdx.x]; Scalar scratch_rot = d_scratch_rot[start + threadIdx.x]; compute_thermo_final_sdata[threadIdx.x] = make_scalar4(scratch.x, scratch.y, scratch.z, scratch_rot); } else compute_thermo_final_sdata[threadIdx.x] = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { compute_thermo_final_sdata[threadIdx.x].x += compute_thermo_final_sdata[threadIdx.x + offs].x; compute_thermo_final_sdata[threadIdx.x].y += compute_thermo_final_sdata[threadIdx.x + offs].y; compute_thermo_final_sdata[threadIdx.x].z += compute_thermo_final_sdata[threadIdx.x + offs].z; compute_thermo_final_sdata[threadIdx.x].w += compute_thermo_final_sdata[threadIdx.x + offs].w; } offs >>= 1; __syncthreads(); } if (threadIdx.x == 0) { final_sum.x += compute_thermo_final_sdata[0].x; final_sum.y += compute_thermo_final_sdata[0].y; final_sum.z += compute_thermo_final_sdata[0].z; final_sum.w += compute_thermo_final_sdata[0].w; } } if (threadIdx.x == 0) { // compute final quantities Scalar ke_trans_total = final_sum.x * Scalar(0.5); Scalar pe_total = final_sum.y + external_energy; Scalar W = final_sum.z + external_virial; Scalar ke_rot_total = final_sum.w; // compute the pressure // volume/area & other 2D stuff needed Scalar volume; Scalar3 L = box.getL(); if (D == 2) { // "volume" is area in 2D volume = L.x * L.y; // W needs to be corrected since the 1/3 factor is built in W *= Scalar(3.0)/Scalar(2.0); } else { volume = L.x * L.y * L.z; } // pressure: P = (N * K_B * T + W)/V Scalar pressure = (Scalar(2.0) * ke_trans_total / Scalar(D) + W) / volume; // fill out the GPUArray d_properties[thermo_index::translational_kinetic_energy] = Scalar(ke_trans_total); d_properties[thermo_index::rotational_kinetic_energy] = Scalar(ke_rot_total); d_properties[thermo_index::potential_energy] = Scalar(pe_total); d_properties[thermo_index::pressure] = pressure; } } //! Complete partial sums and compute final pressure tensor /*! \param d_properties Property array to write final values \param d_scratch Partial sums \param box Box the particles are in \param group_size Number of particles in the group \param num_partial_sums Number of partial sums in \a d_scratch \param external_virial_xx External contribution to virial (xx component) \param external_virial_xy External contribution to virial (xy component) \param external_virial_xz External contribution to virial (xz component) \param external_virial_yy External contribution to virial (yy component) \param external_virial_yz External contribution to virial (yz component) \param external_virial_zz External contribution to virial (zz component) Only one block is executed. In that block, the partial sums are read in and reduced to final values. From the final sums, the thermodynamic properties are computed and written to d_properties. 6*sizeof(Scalar)*block_size bytes of shared memory are needed for this kernel to run. */ __global__ void gpu_compute_pressure_tensor_final_sums(Scalar *d_properties, Scalar *d_scratch, BoxDim box, unsigned int group_size, unsigned int num_partial_sums, Scalar external_virial_xx, Scalar external_virial_xy, Scalar external_virial_xz, Scalar external_virial_yy, Scalar external_virial_yz, Scalar external_virial_zz, bool twod) { Scalar final_sum[6]; final_sum[0] = external_virial_xx; final_sum[1] = external_virial_xy; final_sum[2] = external_virial_xz; final_sum[3] = external_virial_yy; final_sum[4] = external_virial_yz; final_sum[5] = external_virial_zz; // sum up the values in the partial sum via a sliding window for (int start = 0; start < num_partial_sums; start += blockDim.x) { __syncthreads(); if (start + threadIdx.x < num_partial_sums) { for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x] = d_scratch[i*num_partial_sums + start + threadIdx.x]; } else for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x] = Scalar(0.0); __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i*blockDim.x + threadIdx.x] += compute_pressure_tensor_sdata[i*blockDim.x + threadIdx.x + offs]; } offs >>= 1; __syncthreads(); } if (threadIdx.x == 0) { for (unsigned int i = 0; i < 6; i++) final_sum[i] += compute_pressure_tensor_sdata[i*blockDim.x]; } } if (threadIdx.x == 0) { // fill out the GPUArray // we have thus far calculated the sum of the kinetic part of the pressure tensor // and the virial part, the definition includes an inverse factor of the box volume Scalar V = box.getVolume(twod); d_properties[thermo_index::pressure_xx] = final_sum[0]/V; d_properties[thermo_index::pressure_xy] = final_sum[1]/V; d_properties[thermo_index::pressure_xz] = final_sum[2]/V; d_properties[thermo_index::pressure_yy] = final_sum[3]/V; d_properties[thermo_index::pressure_yz] = final_sum[4]/V; d_properties[thermo_index::pressure_zz] = final_sum[5]/V; } } //! Compute partial sums of thermodynamic properties of a group on the GPU, /*! \param d_properties Array to write computed properties \param d_vel particle velocities and masses on the GPU \param d_group_members List of group members \param group_size Number of group members \param box Box the particles are in \param args Additional arguments \param compute_pressure_tensor whether to compute the full pressure tensor \param compute_rotational_energy whether to compute the rotational kinetic energy \param gpu_partition Load balancing info for multi-GPU reduction This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them for details. */ hipError_t gpu_compute_thermo_partial(Scalar *d_properties, Scalar4 *d_vel, unsigned int *d_group_members, unsigned int group_size, const BoxDim& box, const compute_thermo_args& args, bool compute_pressure_tensor, bool compute_rotational_energy, const GPUPartition& gpu_partition ) { assert(d_properties); assert(d_vel); assert(d_group_members); assert(args.d_net_force); assert(args.d_net_virial); assert(args.d_scratch); unsigned int block_offset = 0; // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; dim3 grid(nwork/args.block_size+1, 1, 1); dim3 threads(args.block_size, 1, 1); unsigned int shared_bytes = sizeof(Scalar3)*args.block_size; hipLaunchKernelGGL(( gpu_compute_thermo_partial_sums), dim3(grid),dim3(threads), shared_bytes, 0, args.d_scratch, args.d_net_force, args.d_net_virial, args.virial_pitch, d_vel, d_group_members, nwork, range.first, block_offset); if (compute_pressure_tensor) { assert(args.d_scratch_pressure_tensor); shared_bytes = 6 * sizeof(Scalar) * args.block_size; // run the kernel hipLaunchKernelGGL(( gpu_compute_pressure_tensor_partial_sums), dim3(grid), dim3(threads), shared_bytes, 0, args.d_scratch_pressure_tensor, args.d_net_force, args.d_net_virial, args.virial_pitch, d_vel, d_group_members, nwork, range.first, block_offset, args.n_blocks); } if (compute_rotational_energy) { assert(args.d_scratch_pressure_tensor); shared_bytes = sizeof(Scalar) * args.block_size; // run the kernel hipLaunchKernelGGL(( gpu_compute_rotational_ke_partial_sums), dim3(grid), dim3(threads), shared_bytes, 0, args.d_scratch_rot, args.d_orientation, args.d_angmom, args.d_inertia, d_group_members, nwork, range.first, block_offset); } block_offset += grid.x; } assert(block_offset <= args.n_blocks); return hipSuccess; } //! Compute thermodynamic properties of a group on the GPU /*! \param d_properties Array to write computed properties \param d_vel particle velocities and masses on the GPU \param d_group_members List of group members \param group_size Number of group members \param box Box the particles are in \param args Additional arguments \param compute_pressure_tensor whether to compute the full pressure tensor \param compute_rotational_energy whether to compute the rotational kinetic energy \param num_blocks Number of partial sums to reduce This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them for details. */ hipError_t gpu_compute_thermo_final(Scalar *d_properties, Scalar4 *d_vel, unsigned int *d_group_members, unsigned int group_size, const BoxDim& box, const compute_thermo_args& args, bool compute_pressure_tensor, bool compute_rotational_energy ) { assert(d_properties); assert(d_vel); assert(d_group_members); assert(args.d_net_force); assert(args.d_net_virial); assert(args.d_scratch); // setup the grid to run the final kernel int final_block_size = 512; dim3 grid = dim3(1, 1, 1); dim3 threads = dim3(final_block_size, 1, 1); unsigned int shared_bytes = sizeof(Scalar4)*final_block_size; Scalar external_virial = Scalar(1.0/3.0)*(args.external_virial_xx + args.external_virial_yy + args.external_virial_zz); // run the kernel hipLaunchKernelGGL(( gpu_compute_thermo_final_sums), dim3(grid), dim3(threads), shared_bytes, 0, d_properties, args.d_scratch, args.d_scratch_rot, args.ndof, box, args.D, group_size, args.n_blocks, external_virial, args.external_energy); if (compute_pressure_tensor) { shared_bytes = 6 * sizeof(Scalar) * final_block_size; // run the kernel hipLaunchKernelGGL(( gpu_compute_pressure_tensor_final_sums), dim3(grid), dim3(threads), shared_bytes, 0, d_properties, args.d_scratch_pressure_tensor, box, group_size, args.n_blocks, args.external_virial_xx, args.external_virial_xy, args.external_virial_xz, args.external_virial_yy, args.external_virial_yz, args.external_virial_zz, args.D == 2); } return hipSuccess; }
e7c6f9eedbd332149209d6b66d7d0e12a8e77663.cu
// Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: joaander #include "ComputeThermoGPU.cuh" #include "VectorMath.h" #include <assert.h> //! Shared memory used in reducing the sums extern __shared__ Scalar3 compute_thermo_sdata[]; //! Shared memory used in final reduction extern __shared__ Scalar4 compute_thermo_final_sdata[]; //! Shared memory used in reducing the sums of the pressure tensor extern __shared__ Scalar compute_pressure_tensor_sdata[]; //! Shared memory used in reducing the sum of the rotational kinetic energy extern __shared__ Scalar compute_ke_rot_sdata[]; /*! \file ComputeThermoGPU.cu \brief Defines GPU kernel code for computing thermodynamic properties on the GPU. Used by ComputeThermoGPU. */ //! Perform partial sums of the thermo properties on the GPU /*! \param d_scratch Scratch space to hold partial sums. One element is written per block \param d_net_force Net force / pe array from ParticleData \param d_net_virial Net virial array from ParticleData \param virial_pitch pitch of 2D virial array \param d_velocity Particle velocity and mass array from ParticleData \param d_group_members List of group members for which to sum properties \param work_size Number of particles in the group this GPU processes \param offset Offset of this GPU in list of group members \param block_offset Offset of this GPU in the array of partial sums All partial sums are packaged up in a Scalar4 to keep pointer management down. - 2*Kinetic energy is summed in .x - Potential energy is summed in .y - W is summed in .z One thread is executed per group member. That thread reads in the values for its member into shared memory and then the block performs a reduction in parallel to produce a partial sum output for the block. These partial sums are written to d_scratch[blockIdx.x]. sizeof(Scalar3)*block_size of dynamic shared memory are needed for this kernel to run. */ __global__ void gpu_compute_thermo_partial_sums(Scalar4 *d_scratch, Scalar4 *d_net_force, Scalar *d_net_virial, const unsigned int virial_pitch, Scalar4 *d_velocity, unsigned int *d_group_members, unsigned int work_size, unsigned int offset, unsigned int block_offset) { // determine which particle this thread works on int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar3 my_element; // element of scratch space read in if (group_idx < work_size) { unsigned int idx = d_group_members[group_idx+offset]; // update positions to the next timestep and update velocities to the next half step Scalar4 net_force = d_net_force[idx]; Scalar net_isotropic_virial; // (1/3)*trace of virial tensor net_isotropic_virial = Scalar(1.0/3.0)* (d_net_virial[0*virial_pitch+idx] // xx +d_net_virial[3*virial_pitch+idx] // yy +d_net_virial[5*virial_pitch+idx]); // zz Scalar4 vel = d_velocity[idx]; Scalar mass = vel.w; // compute our contribution to the sum my_element.x = mass * (vel.x*vel.x + vel.y*vel.y + vel.z*vel.z); my_element.y = net_force.w; my_element.z = net_isotropic_virial; } else { // non-participating thread: contribute 0 to the sum my_element = make_scalar3(0, 0, 0); } compute_thermo_sdata[threadIdx.x] = my_element; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { compute_thermo_sdata[threadIdx.x].x += compute_thermo_sdata[threadIdx.x + offs].x; compute_thermo_sdata[threadIdx.x].y += compute_thermo_sdata[threadIdx.x + offs].y; compute_thermo_sdata[threadIdx.x].z += compute_thermo_sdata[threadIdx.x + offs].z; } offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) { Scalar3 res = compute_thermo_sdata[0]; d_scratch[block_offset + blockIdx.x] = make_scalar4(res.x, res.y, res.z, 0); } } //! Perform partial sums of the pressure tensor on the GPU /*! \param d_scratch Scratch space to hold partial sums. One element is written per block \param d_net_force Net force / pe array from ParticleData \param d_net_virial Net virial array from ParticleData \param virial_pitch pitch of 2D virial array \param d_velocity Particle velocity and mass array from ParticleData \param d_group_members List of group members for which to sum properties \param work_size Number of particles in the group \param offset Offset of this GPU in the list of group members \param block_offset Offset of this GPU in the array of partial sums \param num_blocks Total number of partial sums by all GPUs One thread is executed per group member. That thread reads in the six values (components of the pressure tensor) for its member into shared memory and then the block performs a reduction in parallel to produce a partial sum output for the block. These partial sums are written to d_scratch[i*gridDim.x + blockIdx.x], where i=0..5 is the index of the component. For this kernel to run, 6*sizeof(Scalar)*block_size of dynamic shared memory are needed. */ __global__ void gpu_compute_pressure_tensor_partial_sums(Scalar *d_scratch, Scalar4 *d_net_force, Scalar *d_net_virial, const unsigned int virial_pitch, Scalar4 *d_velocity, unsigned int *d_group_members, unsigned int work_size, unsigned int offset, unsigned int block_offset, unsigned int num_blocks) { // determine which particle this thread works on int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar my_element[6]; // element of scratch space read in if (group_idx < work_size) { unsigned int idx = d_group_members[group_idx+offset]; // compute contribution to pressure tensor and store it in my_element Scalar4 vel = d_velocity[idx]; Scalar mass = vel.w; my_element[0] = mass*vel.x*vel.x + d_net_virial[0*virial_pitch+idx]; // xx my_element[1] = mass*vel.x*vel.y + d_net_virial[1*virial_pitch+idx]; // xy my_element[2] = mass*vel.x*vel.z + d_net_virial[2*virial_pitch+idx]; // xz my_element[3] = mass*vel.y*vel.y + d_net_virial[3*virial_pitch+idx]; // yy my_element[4] = mass*vel.y*vel.z + d_net_virial[4*virial_pitch+idx]; // yz my_element[5] = mass*vel.z*vel.z + d_net_virial[5*virial_pitch+idx]; // zz } else { // non-participating thread: contribute 0 to the sum my_element[0] = 0; my_element[1] = 0; my_element[2] = 0; my_element[3] = 0; my_element[4] = 0; my_element[5] = 0; } for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i*blockDim.x+threadIdx.x] = my_element[i]; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i*blockDim.x+threadIdx.x] += compute_pressure_tensor_sdata[i*blockDim.x + threadIdx.x + offs]; } offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) { for (unsigned int i = 0; i < 6; i++) d_scratch[num_blocks * i + blockIdx.x + block_offset] = compute_pressure_tensor_sdata[i*blockDim.x]; } } //! Perform partial sums of the rotational KE on the GPU /*! \param d_scratch Scratch space to hold partial sums. One element is written per block \param d_orientation Orientation quaternions from ParticleData \param d_angmom Conjugate quaternions from ParticleData \param d_inertia Moments of inertia from ParticleData \param d_group_members List of group members for which to sum properties \param work_size Number of particles in the group processed by this GPU \param offset Offset of this GPU in the list of group members \param block_offset Output offset of this GPU */ __global__ void gpu_compute_rotational_ke_partial_sums(Scalar *d_scratch, const Scalar4 *d_orientation, const Scalar4 *d_angmom, const Scalar3 *d_inertia, unsigned int *d_group_members, unsigned int work_size, unsigned int offset, unsigned int block_offset) { // determine which particle this thread works on int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar my_element; // element of scratch space read in if (group_idx < work_size) { unsigned int idx = d_group_members[group_idx+offset]; // update positions to the next timestep and update velocities to the next half step quat<Scalar> q(d_orientation[idx]); quat<Scalar> p(d_angmom[idx]); vec3<Scalar> I(d_inertia[idx]); quat<Scalar> s(Scalar(0.5)*conj(q)*p); Scalar ke_rot(0.0); if (I.x >= EPSILON) { ke_rot += s.v.x*s.v.x/I.x; } if (I.y >= EPSILON) { ke_rot += s.v.y*s.v.y/I.y; } if (I.z >= EPSILON) { ke_rot += s.v.z*s.v.z/I.z; } // compute our contribution to the sum my_element = ke_rot*Scalar(1.0/2.0); } else { // non-participating thread: contribute 0 to the sum my_element = Scalar(0.0); } compute_ke_rot_sdata[threadIdx.x] = my_element; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) compute_ke_rot_sdata[threadIdx.x] += compute_ke_rot_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) { d_scratch[blockIdx.x + block_offset] = compute_ke_rot_sdata[0]; } } //! Complete partial sums and compute final thermodynamic quantities (for pressure, only isotropic contribution) /*! \param d_properties Property array to write final values \param d_scratch Partial sums \param d_scratch_rot Partial sums of rotational kinetic energy \param ndof Number of degrees of freedom this group possesses \param box Box the particles are in \param D Dimensionality of the system \param group_size Number of particles in the group \param num_partial_sums Number of partial sums in \a d_scratch \param external_virial External contribution to virial (1/3 trace) \param external_energy External contribution to potential energy Only one block is executed. In that block, the partial sums are read in and reduced to final values. From the final sums, the thermodynamic properties are computed and written to d_properties. sizeof(Scalar4)*block_size bytes of shared memory are needed for this kernel to run. */ __global__ void gpu_compute_thermo_final_sums(Scalar *d_properties, Scalar4 *d_scratch, Scalar *d_scratch_rot, unsigned int ndof, BoxDim box, unsigned int D, unsigned int group_size, unsigned int num_partial_sums, Scalar external_virial, Scalar external_energy ) { Scalar4 final_sum = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0),Scalar(0.0)); // sum up the values in the partial sum via a sliding window for (int start = 0; start < num_partial_sums; start += blockDim.x) { __syncthreads(); if (start + threadIdx.x < num_partial_sums) { Scalar4 scratch = d_scratch[start + threadIdx.x]; Scalar scratch_rot = d_scratch_rot[start + threadIdx.x]; compute_thermo_final_sdata[threadIdx.x] = make_scalar4(scratch.x, scratch.y, scratch.z, scratch_rot); } else compute_thermo_final_sdata[threadIdx.x] = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { compute_thermo_final_sdata[threadIdx.x].x += compute_thermo_final_sdata[threadIdx.x + offs].x; compute_thermo_final_sdata[threadIdx.x].y += compute_thermo_final_sdata[threadIdx.x + offs].y; compute_thermo_final_sdata[threadIdx.x].z += compute_thermo_final_sdata[threadIdx.x + offs].z; compute_thermo_final_sdata[threadIdx.x].w += compute_thermo_final_sdata[threadIdx.x + offs].w; } offs >>= 1; __syncthreads(); } if (threadIdx.x == 0) { final_sum.x += compute_thermo_final_sdata[0].x; final_sum.y += compute_thermo_final_sdata[0].y; final_sum.z += compute_thermo_final_sdata[0].z; final_sum.w += compute_thermo_final_sdata[0].w; } } if (threadIdx.x == 0) { // compute final quantities Scalar ke_trans_total = final_sum.x * Scalar(0.5); Scalar pe_total = final_sum.y + external_energy; Scalar W = final_sum.z + external_virial; Scalar ke_rot_total = final_sum.w; // compute the pressure // volume/area & other 2D stuff needed Scalar volume; Scalar3 L = box.getL(); if (D == 2) { // "volume" is area in 2D volume = L.x * L.y; // W needs to be corrected since the 1/3 factor is built in W *= Scalar(3.0)/Scalar(2.0); } else { volume = L.x * L.y * L.z; } // pressure: P = (N * K_B * T + W)/V Scalar pressure = (Scalar(2.0) * ke_trans_total / Scalar(D) + W) / volume; // fill out the GPUArray d_properties[thermo_index::translational_kinetic_energy] = Scalar(ke_trans_total); d_properties[thermo_index::rotational_kinetic_energy] = Scalar(ke_rot_total); d_properties[thermo_index::potential_energy] = Scalar(pe_total); d_properties[thermo_index::pressure] = pressure; } } //! Complete partial sums and compute final pressure tensor /*! \param d_properties Property array to write final values \param d_scratch Partial sums \param box Box the particles are in \param group_size Number of particles in the group \param num_partial_sums Number of partial sums in \a d_scratch \param external_virial_xx External contribution to virial (xx component) \param external_virial_xy External contribution to virial (xy component) \param external_virial_xz External contribution to virial (xz component) \param external_virial_yy External contribution to virial (yy component) \param external_virial_yz External contribution to virial (yz component) \param external_virial_zz External contribution to virial (zz component) Only one block is executed. In that block, the partial sums are read in and reduced to final values. From the final sums, the thermodynamic properties are computed and written to d_properties. 6*sizeof(Scalar)*block_size bytes of shared memory are needed for this kernel to run. */ __global__ void gpu_compute_pressure_tensor_final_sums(Scalar *d_properties, Scalar *d_scratch, BoxDim box, unsigned int group_size, unsigned int num_partial_sums, Scalar external_virial_xx, Scalar external_virial_xy, Scalar external_virial_xz, Scalar external_virial_yy, Scalar external_virial_yz, Scalar external_virial_zz, bool twod) { Scalar final_sum[6]; final_sum[0] = external_virial_xx; final_sum[1] = external_virial_xy; final_sum[2] = external_virial_xz; final_sum[3] = external_virial_yy; final_sum[4] = external_virial_yz; final_sum[5] = external_virial_zz; // sum up the values in the partial sum via a sliding window for (int start = 0; start < num_partial_sums; start += blockDim.x) { __syncthreads(); if (start + threadIdx.x < num_partial_sums) { for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x] = d_scratch[i*num_partial_sums + start + threadIdx.x]; } else for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x] = Scalar(0.0); __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i*blockDim.x + threadIdx.x] += compute_pressure_tensor_sdata[i*blockDim.x + threadIdx.x + offs]; } offs >>= 1; __syncthreads(); } if (threadIdx.x == 0) { for (unsigned int i = 0; i < 6; i++) final_sum[i] += compute_pressure_tensor_sdata[i*blockDim.x]; } } if (threadIdx.x == 0) { // fill out the GPUArray // we have thus far calculated the sum of the kinetic part of the pressure tensor // and the virial part, the definition includes an inverse factor of the box volume Scalar V = box.getVolume(twod); d_properties[thermo_index::pressure_xx] = final_sum[0]/V; d_properties[thermo_index::pressure_xy] = final_sum[1]/V; d_properties[thermo_index::pressure_xz] = final_sum[2]/V; d_properties[thermo_index::pressure_yy] = final_sum[3]/V; d_properties[thermo_index::pressure_yz] = final_sum[4]/V; d_properties[thermo_index::pressure_zz] = final_sum[5]/V; } } //! Compute partial sums of thermodynamic properties of a group on the GPU, /*! \param d_properties Array to write computed properties \param d_vel particle velocities and masses on the GPU \param d_group_members List of group members \param group_size Number of group members \param box Box the particles are in \param args Additional arguments \param compute_pressure_tensor whether to compute the full pressure tensor \param compute_rotational_energy whether to compute the rotational kinetic energy \param gpu_partition Load balancing info for multi-GPU reduction This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them for details. */ cudaError_t gpu_compute_thermo_partial(Scalar *d_properties, Scalar4 *d_vel, unsigned int *d_group_members, unsigned int group_size, const BoxDim& box, const compute_thermo_args& args, bool compute_pressure_tensor, bool compute_rotational_energy, const GPUPartition& gpu_partition ) { assert(d_properties); assert(d_vel); assert(d_group_members); assert(args.d_net_force); assert(args.d_net_virial); assert(args.d_scratch); unsigned int block_offset = 0; // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; dim3 grid(nwork/args.block_size+1, 1, 1); dim3 threads(args.block_size, 1, 1); unsigned int shared_bytes = sizeof(Scalar3)*args.block_size; gpu_compute_thermo_partial_sums<<<grid,threads, shared_bytes>>>(args.d_scratch, args.d_net_force, args.d_net_virial, args.virial_pitch, d_vel, d_group_members, nwork, range.first, block_offset); if (compute_pressure_tensor) { assert(args.d_scratch_pressure_tensor); shared_bytes = 6 * sizeof(Scalar) * args.block_size; // run the kernel gpu_compute_pressure_tensor_partial_sums<<<grid, threads, shared_bytes>>>(args.d_scratch_pressure_tensor, args.d_net_force, args.d_net_virial, args.virial_pitch, d_vel, d_group_members, nwork, range.first, block_offset, args.n_blocks); } if (compute_rotational_energy) { assert(args.d_scratch_pressure_tensor); shared_bytes = sizeof(Scalar) * args.block_size; // run the kernel gpu_compute_rotational_ke_partial_sums<<<grid, threads, shared_bytes>>>(args.d_scratch_rot, args.d_orientation, args.d_angmom, args.d_inertia, d_group_members, nwork, range.first, block_offset); } block_offset += grid.x; } assert(block_offset <= args.n_blocks); return cudaSuccess; } //! Compute thermodynamic properties of a group on the GPU /*! \param d_properties Array to write computed properties \param d_vel particle velocities and masses on the GPU \param d_group_members List of group members \param group_size Number of group members \param box Box the particles are in \param args Additional arguments \param compute_pressure_tensor whether to compute the full pressure tensor \param compute_rotational_energy whether to compute the rotational kinetic energy \param num_blocks Number of partial sums to reduce This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them for details. */ cudaError_t gpu_compute_thermo_final(Scalar *d_properties, Scalar4 *d_vel, unsigned int *d_group_members, unsigned int group_size, const BoxDim& box, const compute_thermo_args& args, bool compute_pressure_tensor, bool compute_rotational_energy ) { assert(d_properties); assert(d_vel); assert(d_group_members); assert(args.d_net_force); assert(args.d_net_virial); assert(args.d_scratch); // setup the grid to run the final kernel int final_block_size = 512; dim3 grid = dim3(1, 1, 1); dim3 threads = dim3(final_block_size, 1, 1); unsigned int shared_bytes = sizeof(Scalar4)*final_block_size; Scalar external_virial = Scalar(1.0/3.0)*(args.external_virial_xx + args.external_virial_yy + args.external_virial_zz); // run the kernel gpu_compute_thermo_final_sums<<<grid, threads, shared_bytes>>>(d_properties, args.d_scratch, args.d_scratch_rot, args.ndof, box, args.D, group_size, args.n_blocks, external_virial, args.external_energy); if (compute_pressure_tensor) { shared_bytes = 6 * sizeof(Scalar) * final_block_size; // run the kernel gpu_compute_pressure_tensor_final_sums<<<grid, threads, shared_bytes>>>(d_properties, args.d_scratch_pressure_tensor, box, group_size, args.n_blocks, args.external_virial_xx, args.external_virial_xy, args.external_virial_xz, args.external_virial_yy, args.external_virial_yz, args.external_virial_zz, args.D == 2); } return cudaSuccess; }
c331c025c76051b6b6e683ed2eb60712625e9c34.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zgemm_fermi.cu, normal z -> s, Thu Oct 8 23:05:32 2020 @author Jakub Kurzak @author Stan Tomov @author Mark Gates [zcds]gemm_fermi.cu defines the CPU driver. [zcds]gemm_fermi_kernels.h defines the block sizes for each precision. gemm_stencil_defs.h defines types and functions for precision-independent code. These files are included multiple times, once for each transpose version. gemm_stencil.cuh defines the GPU kernel (device function). gemm_kernel.cuh defines the GPU kernel (global function). The batched version uses gemm_kernel_batched.cuh instead of gemm_kernel.cuh. */ #include "magma_internal.h" #include "commonblas_s.h" #define PRECISION_s #include "sgemm_fermi_kernels.h" /***************************************************************************//** Purpose ------- SGEMM performs one of the matrix-matrix operations C = alpha*op( A )*op( B ) + beta*C, where op( X ) is one of op( X ) = X or op( X ) = X**T or op( X ) = X**H, alpha and beta are scalars, and A, B and C are matrices, with op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. Parameters ---------- @param[in] transA magma_trans_t. On entry, transA specifies the form of op( A ) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op( A ) = A. - = MagmaTrans: op( A ) = A**T. - = MagmaConjTrans: op( A ) = A**H. @param[in] transB magma_trans_t. On entry, transB specifies the form of op( B ) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op( B ) = B. - = MagmaTrans: op( B ) = B**T. - = MagmaConjTrans: op( B ) = B**H. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix op( dA ) and of the matrix dC. M must be at least zero. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix op( dB ) and the number of columns of the matrix dC. N must be at least zero. @param[in] k INTEGER. On entry, K specifies the number of columns of the matrix op( dA ) and the number of rows of the matrix op( dB ). K must be at least zero. @param[in] alpha REAL On entry, ALPHA specifies the scalar alpha. @param[in] dA REAL array of DIMENSION ( LDA, ka ), where ka is k when transA = MagmaNoTrans, and is m otherwise. Before entry with transA = MagmaNoTrans, the leading m by k part of the array dA must contain the matrix dA, otherwise the leading k by m part of the array dA must contain the matrix dA. @param[in] ldda INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When transA = MagmaNoTrans then LDA must be at least max( 1, m ), otherwise LDA must be at least max( 1, k ). @param[in] dB REAL array of DIMENSION ( LDB, kb ), where kb is n when transB = MagmaNoTrans, and is k otherwise. Before entry with transB = MagmaNoTrans, the leading k by n part of the array dB must contain the matrix dB, otherwise the leading n by k part of the array dB must contain the matrix dB. @param[in] lddb INTEGER. On entry, LDB specifies the first dimension of dB as declared in the calling (sub) program. When transB = MagmaNoTrans then LDB must be at least max( 1, k ), otherwise LDB must be at least max( 1, n ). @param[in] beta REAL. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then dC need not be set on input. @param[in,out] dC REAL array of DIMENSION ( LDC, n ). Before entry, the leading m by n part of the array dC must contain the matrix dC, except when beta is zero, in which case dC need not be set on entry. On exit, the array dC is overwritten by the m by n matrix ( alpha*op( dA )*op( dB ) + beta*dC ). @param[in] lddc INTEGER. On entry, LDC specifies the first dimension of dC as declared in the calling (sub) program. LDC must be at least max( 1, m ). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_gemm *******************************************************************************/ extern "C" void magmablas_sgemm( magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magma_int_t k, float alpha, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_const_ptr dB, magma_int_t lddb, float beta, magmaFloat_ptr dC, magma_int_t lddc, magma_queue_t queue ) { magma_int_t info = 0; if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans ) info = -1; else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans ) info = -2; else if ( m < 0 ) info = -3; else if ( n < 0 ) info = -4; else if ( k < 0 ) info = -5; else if ( transA == MagmaNoTrans ? ldda < m : ldda < k ) info = -8; else if ( transB == MagmaNoTrans ? lddb < k : lddb < n ) info = -10; else if ( lddc < m ) info = -13; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( m <= 0 || n <= 0 || k <= 0 ) return; size_t offsetA = 0; size_t offsetB = 0; int TransA = 2, TransB = 2; if ( transA == MagmaTrans ) TransA = 1; else if ( transA == MagmaNoTrans ) TransA = 0; if ( transB == MagmaTrans ) TransB = 1; else if ( transB == MagmaNoTrans ) TransB = 0; magma_int_t Am = ( ! TransA ? m : k); magma_int_t An = (!TransA ? k : m); magma_int_t Bm = ( ! TransB ? k : n); magma_int_t Bn = (!TransB ? n : k); size_t sizeA = (size_t) ldda * (An - 1) + Am; size_t sizeB = (size_t) lddb * (Bn - 1) + Bm; size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512); if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE || sizeB >= CUBLAS_MAX_1DBUF_SIZE ) { magma_sgemm( transA, transB, m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc, queue ); return; } #ifdef TEXTURE_1D // Set textures parameters tex_ref_Amagma_s.normalized = false; tex_ref_Amagma_s.filterMode = hipFilterModePoint; tex_ref_Amagma_s.addressMode[0] = hipAddressModeClamp; tex_ref_Bmagma_s.normalized = false; tex_ref_Bmagma_s.filterMode = hipFilterModePoint; tex_ref_Bmagma_s.addressMode[0] = hipAddressModeClamp; // Bind A and B to texture references hipError_t err; err = hipBindTexture(&offsetA, tex_ref_Amagma_s, dA, sizeA*sizeof(float)); if ( err != hipSuccess ) { fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err ); return; } err = hipBindTexture(&offsetB, tex_ref_Bmagma_s, dB, sizeB*sizeof(float)); if ( err != hipSuccess ) { fprintf( stderr, "cannot bind B to texture: %s (%d)\n", hipGetErrorString(err), err ); hipUnbindTexture( tex_ref_Amagma_s ); return; } #endif // Set up grids dim3 dimBlock(DIM_X, DIM_Y); offsetA = offsetA/sizeof(dA[0]); offsetB = offsetB/sizeof(dB[0]); if ( TransA == 0 && TransB == 0 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_nn ), magma_ceildiv( n, BLK_N_nn ) ); hipLaunchKernelGGL(( sgemm_kernel_fermi_nn), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 1 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_nt ), magma_ceildiv( n, BLK_N_nt ) ); hipLaunchKernelGGL(( sgemm_kernel_fermi_nt), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 2 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_nc ), magma_ceildiv( n, BLK_N_nc ) ); hipLaunchKernelGGL(( sgemm_kernel_fermi_nc), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 0 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_tn ), magma_ceildiv( n, BLK_N_tn ) ); hipLaunchKernelGGL(( sgemm_kernel_fermi_tn), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 1 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_tt ), magma_ceildiv( n, BLK_N_tt ) ); hipLaunchKernelGGL(( sgemm_kernel_fermi_tt), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 2 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_tc ), magma_ceildiv( n, BLK_N_tc ) ); hipLaunchKernelGGL(( sgemm_kernel_fermi_tc), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 0 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_cn ), magma_ceildiv( n, BLK_N_cn ) ); hipLaunchKernelGGL(( sgemm_kernel_fermi_cn), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 1 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_ct ), magma_ceildiv( n, BLK_N_ct ) ); hipLaunchKernelGGL(( sgemm_kernel_fermi_ct), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 2 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_cc ), magma_ceildiv( n, BLK_N_cc ) ); hipLaunchKernelGGL(( sgemm_kernel_fermi_cc), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } #ifdef TEXTURE_1D hipUnbindTexture( tex_ref_Amagma_s ); hipUnbindTexture( tex_ref_Bmagma_s ); #endif }
c331c025c76051b6b6e683ed2eb60712625e9c34.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zgemm_fermi.cu, normal z -> s, Thu Oct 8 23:05:32 2020 @author Jakub Kurzak @author Stan Tomov @author Mark Gates [zcds]gemm_fermi.cu defines the CPU driver. [zcds]gemm_fermi_kernels.h defines the block sizes for each precision. gemm_stencil_defs.h defines types and functions for precision-independent code. These files are included multiple times, once for each transpose version. gemm_stencil.cuh defines the GPU kernel (device function). gemm_kernel.cuh defines the GPU kernel (global function). The batched version uses gemm_kernel_batched.cuh instead of gemm_kernel.cuh. */ #include "magma_internal.h" #include "commonblas_s.h" #define PRECISION_s #include "sgemm_fermi_kernels.h" /***************************************************************************//** Purpose ------- SGEMM performs one of the matrix-matrix operations C = alpha*op( A )*op( B ) + beta*C, where op( X ) is one of op( X ) = X or op( X ) = X**T or op( X ) = X**H, alpha and beta are scalars, and A, B and C are matrices, with op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. Parameters ---------- @param[in] transA magma_trans_t. On entry, transA specifies the form of op( A ) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op( A ) = A. - = MagmaTrans: op( A ) = A**T. - = MagmaConjTrans: op( A ) = A**H. @param[in] transB magma_trans_t. On entry, transB specifies the form of op( B ) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op( B ) = B. - = MagmaTrans: op( B ) = B**T. - = MagmaConjTrans: op( B ) = B**H. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix op( dA ) and of the matrix dC. M must be at least zero. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix op( dB ) and the number of columns of the matrix dC. N must be at least zero. @param[in] k INTEGER. On entry, K specifies the number of columns of the matrix op( dA ) and the number of rows of the matrix op( dB ). K must be at least zero. @param[in] alpha REAL On entry, ALPHA specifies the scalar alpha. @param[in] dA REAL array of DIMENSION ( LDA, ka ), where ka is k when transA = MagmaNoTrans, and is m otherwise. Before entry with transA = MagmaNoTrans, the leading m by k part of the array dA must contain the matrix dA, otherwise the leading k by m part of the array dA must contain the matrix dA. @param[in] ldda INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When transA = MagmaNoTrans then LDA must be at least max( 1, m ), otherwise LDA must be at least max( 1, k ). @param[in] dB REAL array of DIMENSION ( LDB, kb ), where kb is n when transB = MagmaNoTrans, and is k otherwise. Before entry with transB = MagmaNoTrans, the leading k by n part of the array dB must contain the matrix dB, otherwise the leading n by k part of the array dB must contain the matrix dB. @param[in] lddb INTEGER. On entry, LDB specifies the first dimension of dB as declared in the calling (sub) program. When transB = MagmaNoTrans then LDB must be at least max( 1, k ), otherwise LDB must be at least max( 1, n ). @param[in] beta REAL. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then dC need not be set on input. @param[in,out] dC REAL array of DIMENSION ( LDC, n ). Before entry, the leading m by n part of the array dC must contain the matrix dC, except when beta is zero, in which case dC need not be set on entry. On exit, the array dC is overwritten by the m by n matrix ( alpha*op( dA )*op( dB ) + beta*dC ). @param[in] lddc INTEGER. On entry, LDC specifies the first dimension of dC as declared in the calling (sub) program. LDC must be at least max( 1, m ). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_gemm *******************************************************************************/ extern "C" void magmablas_sgemm( magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magma_int_t k, float alpha, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_const_ptr dB, magma_int_t lddb, float beta, magmaFloat_ptr dC, magma_int_t lddc, magma_queue_t queue ) { magma_int_t info = 0; if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans ) info = -1; else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans ) info = -2; else if ( m < 0 ) info = -3; else if ( n < 0 ) info = -4; else if ( k < 0 ) info = -5; else if ( transA == MagmaNoTrans ? ldda < m : ldda < k ) info = -8; else if ( transB == MagmaNoTrans ? lddb < k : lddb < n ) info = -10; else if ( lddc < m ) info = -13; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( m <= 0 || n <= 0 || k <= 0 ) return; size_t offsetA = 0; size_t offsetB = 0; int TransA = 2, TransB = 2; if ( transA == MagmaTrans ) TransA = 1; else if ( transA == MagmaNoTrans ) TransA = 0; if ( transB == MagmaTrans ) TransB = 1; else if ( transB == MagmaNoTrans ) TransB = 0; magma_int_t Am = ( ! TransA ? m : k); magma_int_t An = (!TransA ? k : m); magma_int_t Bm = ( ! TransB ? k : n); magma_int_t Bn = (!TransB ? n : k); size_t sizeA = (size_t) ldda * (An - 1) + Am; size_t sizeB = (size_t) lddb * (Bn - 1) + Bm; size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512); if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE || sizeB >= CUBLAS_MAX_1DBUF_SIZE ) { magma_sgemm( transA, transB, m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc, queue ); return; } #ifdef TEXTURE_1D // Set textures parameters tex_ref_Amagma_s.normalized = false; tex_ref_Amagma_s.filterMode = cudaFilterModePoint; tex_ref_Amagma_s.addressMode[0] = cudaAddressModeClamp; tex_ref_Bmagma_s.normalized = false; tex_ref_Bmagma_s.filterMode = cudaFilterModePoint; tex_ref_Bmagma_s.addressMode[0] = cudaAddressModeClamp; // Bind A and B to texture references cudaError_t err; err = cudaBindTexture(&offsetA, tex_ref_Amagma_s, dA, sizeA*sizeof(float)); if ( err != cudaSuccess ) { fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err ); return; } err = cudaBindTexture(&offsetB, tex_ref_Bmagma_s, dB, sizeB*sizeof(float)); if ( err != cudaSuccess ) { fprintf( stderr, "cannot bind B to texture: %s (%d)\n", cudaGetErrorString(err), err ); cudaUnbindTexture( tex_ref_Amagma_s ); return; } #endif // Set up grids dim3 dimBlock(DIM_X, DIM_Y); offsetA = offsetA/sizeof(dA[0]); offsetB = offsetB/sizeof(dB[0]); if ( TransA == 0 && TransB == 0 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_nn ), magma_ceildiv( n, BLK_N_nn ) ); sgemm_kernel_fermi_nn<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 1 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_nt ), magma_ceildiv( n, BLK_N_nt ) ); sgemm_kernel_fermi_nt<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 2 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_nc ), magma_ceildiv( n, BLK_N_nc ) ); sgemm_kernel_fermi_nc<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 0 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_tn ), magma_ceildiv( n, BLK_N_tn ) ); sgemm_kernel_fermi_tn<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 1 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_tt ), magma_ceildiv( n, BLK_N_tt ) ); sgemm_kernel_fermi_tt<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 2 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_tc ), magma_ceildiv( n, BLK_N_tc ) ); sgemm_kernel_fermi_tc<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 0 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_cn ), magma_ceildiv( n, BLK_N_cn ) ); sgemm_kernel_fermi_cn<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 1 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_ct ), magma_ceildiv( n, BLK_N_ct ) ); sgemm_kernel_fermi_ct<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 2 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_cc ), magma_ceildiv( n, BLK_N_cc ) ); sgemm_kernel_fermi_cc<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } #ifdef TEXTURE_1D cudaUnbindTexture( tex_ref_Amagma_s ); cudaUnbindTexture( tex_ref_Bmagma_s ); #endif }
80712a13e00382da2bb97b5762eadc2ac3fb1875.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void hello_kernel() { printf("hello world from cudaa thread %d\n", int(threadIdx.x)); } int main(void) { hipLaunchKernelGGL(( hello_kernel), dim3(1), dim3(32), 0, 0, ); hipDeviceSynchronize(); return 0; }
80712a13e00382da2bb97b5762eadc2ac3fb1875.cu
#include <stdio.h> __global__ void hello_kernel() { printf("hello world from cudaa thread %d\n", int(threadIdx.x)); } int main(void) { hello_kernel<<<1, 32>>>(); cudaDeviceSynchronize(); return 0; }
ec832685d73844e851536badfbe88569a24ab584.hip
// !!! This is a file automatically generated by hipify!!! #include <chrono> #include <iomanip> #include <iostream> #include <fstream> #include <string> #include <utility> #include <vector> #include <hip/hip_runtime.h> #include <hipfft.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <thrust/host_vector.h> #include <thrust/sequence.h> #include <thrust/transform.h> #include "filterbank.hpp" #include "errors.hpp" using std::cerr; using std::cout; using std::endl; using std::ifstream; using std::ofstream; using std::string; using std::vector; #define DEBUG 0 #define GPURUN 1 #define NACCUMULATE 4000 #define NPOL 2 #define PERBLOCK 625 #define TIMEAVG 16 #define TIMESCALE 0.125 #define UNPACKFACTOR 4 #define VDIFSIZE 8000 #define FFTOUT 257 #define FFTUSE 256 struct FrameInfo { unsigned int frameno; unsigned int refsecond; unsigned int refepoch; }; struct Timing { float readtime; float scaletime; float filtime; float savetime; float totaltime; float intertime; }; struct FactorFunctor { __host__ __device__ float operator()(float val) { return val != 0 ? 1.0f/val : val; } }; __constant__ unsigned char kMask[] = {0x03, 0x0C, 0x30, 0xC0}; __global__ void UnpackKernel(unsigned char **in, float **out, size_t samples) { // NOTE: Each thread in the block processes 625 incoming bytes int idx = blockIdx.x * blockDim.x * PERBLOCK + threadIdx.x; int tmod = threadIdx.x % 4; // NOTE: Each thread can store one value __shared__ unsigned char incoming[1024]; int outidx = blockIdx.x * blockDim.x * PERBLOCK * 4; for (int isamp = 0; isamp < PERBLOCK; ++isamp) { if (idx < samples) { for (int ipol = 0; ipol < 2; ++ipol) { incoming[threadIdx.x] = in[ipol][idx]; __syncthreads(); int outidx2 = outidx + threadIdx.x; for (int ichunk = 0; ichunk < 4; ++ichunk) { int inidx = threadIdx.x / 4 + ichunk * 256; unsigned char inval = incoming[inidx]; out[ipol][outidx2] = static_cast<float>(static_cast<short>(((inval & kMask[tmod]) >> (2 * tmod)))); outidx2 += 1024; } } } idx += blockDim.x; outidx += blockDim.x * 4; } } // NOTE: Does not do any frequency averaging // NOTE: Outputs only the total intensity and no other Stokes parameters // NOTE: PERBLOCK is the number of output samples per block __global__ void DetectKernel(hipfftComplex** __restrict__ in, float* __restrict__ out) { int outidx = blockIdx.x * PERBLOCK * FFTUSE + FFTUSE - threadIdx.x - 1; int inidx = blockIdx.x * PERBLOCK * TIMEAVG * FFTOUT + threadIdx.x + 1; float outvalue = 0.0f; hipfftComplex polval; for (int isamp = 0; isamp < PERBLOCK; ++isamp) { // NOTE: Read the data from the incoming array for (int ipol = 0; ipol < 2; ++ipol) { for (int iavg = 0; iavg < TIMEAVG; ++iavg) { polval = in[ipol][inidx + iavg * FFTOUT]; outvalue += polval.x * polval.x + polval.y * polval.y; } } outvalue *= TIMESCALE; out[outidx] = outvalue; inidx += FFTOUT * TIMEAVG; outidx += FFTUSE; outvalue = 0.0; } } __global__ void DetectScaleKernel(hipfftComplex** __restrict__ in, unsigned char* __restrict__ out, float* __restrict__ means, float* __restrict__ stdevs) { int outidx = blockIdx.x * PERBLOCK * FFTUSE + FFTUSE - threadIdx.x - 1; int inidx = blockIdx.x * PERBLOCK * TIMEAVG * FFTOUT + threadIdx.x + 1; float outvalue = 0.0f; hipfftComplex polval; int scaled = 0; for (int isamp = 0; isamp < PERBLOCK; ++isamp) { // NOTE: Read the data from the incoming array for (int ipol = 0; ipol < 2; ++ipol) { for (int iavg = 0; iavg < TIMEAVG; ++iavg) { polval = in[ipol][inidx + iavg * FFTOUT]; outvalue += polval.x * polval.x + polval.y * polval.y; } } outvalue *= TIMESCALE; scaled = __float2int_ru((outvalue - means[FFTUSE - threadIdx.x - 1]) / stdevs[FFTUSE - threadIdx.x - 1] * 32.0f + 128.0f); if (scaled > 255) { scaled = 255; } else if (scaled < 0) { scaled = 0; } out[outidx] = (unsigned char)scaled; inidx += FFTOUT * TIMEAVG; outidx += FFTUSE; outvalue = 0.0; } } __global__ void InitDivFactors(float *factors, size_t togenerate) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // NOTE: I don't want to be dividing by 0 // NOTE: idx of 0 will not be used anyway if (idx < togenerate) { if (idx != 0) { factors[idx] = 1.0f / idx; } else { factors[idx] = idx; } } } __global__ void GetScalingFactorsKernel(float* __restrict__ indata, float *base, float *stdev, float *factors, int processed) { // NOTE: Filterbank file format coming in //float mean = indata[threadIdx.x]; float mean = 0.0f; // NOTE: Depending whether I save STD or VAR at the end of every run // float estd = stdev[threadIdx.x]; float estd = stdev[threadIdx.x] * stdev[threadIdx.x] * (processed - 1.0f); float oldmean = base[threadIdx.x]; //float estd = 0.0f; //float oldmean = 0.0; float val = 0.0f; float diff = 0.0; // NOTE: There are 15625 output time samples per NACCUMULATE frames for (int isamp = 0; isamp < 15625; ++isamp) { val = indata[isamp * FFTUSE + threadIdx.x]; diff = val - oldmean; mean = oldmean + diff * factors[processed + isamp + 1]; estd += diff * (val - mean); oldmean = mean; } base[threadIdx.x] = mean; stdev[threadIdx.x] = sqrtf(estd / (float)(processed + 15625 - 1.0f)); // stdev[threadIdx.x] = estd; } int main(int argc, char *argv[]) { string inpola, inpolb, outfil, config; double readsec; bool scaling = false; bool saveinter = false; if ((argc < 5) || (argv[1] == "-h") || (argv[1] == "--help")) { cout << "Incorrect number of arguments!" << endl; cout << "Command line options:" << endl << "-a <filename> - input file for polarisation a" << endl << "-b <filename> - input file for polarisation b" << endl << "-o <filename> - output filterbank file" << endl << "-c <filename> - input configuration file" << endl << "-r <number> - number of seconds to process - CURRENTLY NOT WORKING" << endl << "-s - enable scaling from 32 bits to 8 bits" << endl << "-i - enable saving the intermediate data products" << endl << "-h, --help - display this message" << endl; exit(EXIT_SUCCESS); } for (int iarg = 0; iarg < argc; ++iarg) { if (std::string(argv[iarg]) == "-a") { iarg++; inpola = std::string(argv[iarg]); } else if (std::string(argv[iarg]) == "-b") { iarg++; inpolb = std::string(argv[iarg]); } else if (std::string(argv[iarg]) == "-o") { iarg++; outfil = std::string(argv[iarg]); } else if (std::string(argv[iarg]) == "-c") { iarg++; config = std::string(argv[iarg]); } else if (std::string(argv[iarg]) == "-s") { cout << "Will scale the data to 8 bits" << endl; scaling = true; } else if (std::string(argv[iarg]) == "-i") { cout << "Will save the intermediate products" << endl; saveinter = true; } else if (std::string(argv[iarg]) == "-r") { iarg++; readsec = std::stod(argv[iarg]); } } cout << "Input files: " << inpola << " " << inpolb << endl; FilHead filhead; ReadFilterbankHeader(config, filhead); if (scaling) { filhead.nbits = 8; } // TODO: This will be wrong for R2C FFT filhead.tsamp = 1.0 / (2.0 * filhead.foff) * 2 * FFTUSE * TIMEAVG; // TODO: Make sure it is the middle of the top frequency channel filhead.fch1 = (filhead.fch1 + filhead.foff / 2.0f) * 1e-06; filhead.nchans = FFTUSE; filhead.foff = -1.0 * filhead.foff / FFTUSE * 1e-06 ; filhead.fch1 = filhead.fch1 + filhead.foff / 2.0; if (DEBUG) { cout << "Some header info:\n" << "Raw file: " << filhead.rawfile << endl << "Source name: " << filhead.source << endl << "Azimuth: " << filhead.az << endl << "Zenith angle: " << filhead.za << endl << "Declination: " << filhead.dec << endl << "Right ascension: " << filhead.ra << endl << "Top channel frequency: " << filhead.fch1 << endl << "Channel bandwidth: " << filhead.foff << endl << "Number of channels: " << filhead.nchans << endl << "Sampling time: " << filhead.tsamp << endl << "Bits per sample: " << filhead.nbits << endl; } // TODO: Make sure there are correct values for bandwidth and sampling time in the header after taking averaging into account ifstream filepola(inpola.c_str(), ifstream::in | ifstream::binary); ifstream filepolb(inpolb.c_str(), ifstream::in | ifstream::binary); ofstream filfile(outfil.c_str(), ofstream::out | ofstream::binary); if (!filepola || !filepolb) { if (!filepola) { cout << "Could not open file " << inpola << endl; } if (!filepolb) { cout << "Could not open file " << inpolb << endl; } exit(EXIT_FAILURE); } // TODO: Can save the filterbank header straight away, after the first header is read unsigned char vdifheadpola[32]; unsigned char vdifheadpolb[32]; filepola.read(reinterpret_cast<char*>(vdifheadpola), 32); filepolb.read(reinterpret_cast<char*>(vdifheadpolb), 32); filepola.seekg(0, filepola.end); long long filelengtha = filepola.tellg(); filepola.seekg(0, filepola.beg); filepolb.seekg(0, filepolb.end); long long filelengthb = filepolb.tellg(); filepolb.seekg(0, filepolb.beg); unsigned int startframe; unsigned int startsecond; cout << filelengtha << " " << filelengthb << endl; startframe = (unsigned int)(vdifheadpola[4] | (vdifheadpola[5] << 8) | (vdifheadpola[6] << 16)); // frame number in this second startsecond = (unsigned int)(vdifheadpola[0] | (vdifheadpola[1] << 8) | (vdifheadpola[2] << 16) | ((vdifheadpola[3] & 0x3f) << 24)); if (DEBUG) { cout << "Starting time: " << startsecond << ":" << startframe << endl; } // NOTE: Need to read headers in unsigned int toread = NACCUMULATE * 8000; // NOTE: No more headers after unpacking unsigned int unpackedsize = NACCUMULATE * VDIFSIZE * UNPACKFACTOR; unsigned int fftedsize = unpackedsize / (2 * FFTUSE) * FFTOUT; unsigned int powersize = unpackedsize / (2 * FFTUSE) * FFTUSE / TIMEAVG; hipfftHandle fftplan; int fftsizes[1]; fftsizes[0] = 2 * FFTUSE; int fftbatchsize = unpackedsize / fftsizes[0]; cout << fftbatchsize << endl; cufftCheckError(hipfftPlanMany(&fftplan, 1, fftsizes, NULL, 1, FFTUSE, NULL, 1, FFTUSE, HIPFFT_R2C, fftbatchsize)); unsigned char *tmppola = new unsigned char[toread]; unsigned char *tmppolb = new unsigned char[toread]; unsigned char *devpola; unsigned char *devpolb; unsigned char **datapol = new unsigned char*[NPOL]; unsigned char **devpol; float **unpacked = new float*[NPOL]; float **devunpacked; hipfftComplex **ffted = new hipfftComplex*[NPOL]; hipfftComplex **devffted; unsigned char *devpower; unsigned char *tmppower = new unsigned char[powersize * filhead.nbits / 8]; if (GPURUN) { cudaCheckError(hipMalloc((void**)&devpola, toread * sizeof(unsigned char))); cudaCheckError(hipMalloc((void**)&devpolb, toread * sizeof(unsigned char))); cudaCheckError(hipMalloc((void**)&devpol, NPOL * sizeof(unsigned char*))); cudaCheckError(hipMalloc((void**)&datapol[0], toread * sizeof(unsigned char))); cudaCheckError(hipMalloc((void**)&datapol[1], toread * sizeof(unsigned char))); cudaCheckError(hipMemcpy(devpol, datapol, NPOL * sizeof(unsigned char*), hipMemcpyHostToDevice)); cudaCheckError(hipMalloc((void**)&devunpacked, NPOL * sizeof(float*))); cudaCheckError(hipMalloc((void**)&unpacked[0], unpackedsize * sizeof(float))); cudaCheckError(hipMalloc((void**)&unpacked[1], unpackedsize * sizeof(float))); cudaCheckError(hipMemcpy(devunpacked, unpacked, NPOL * sizeof(float*), hipMemcpyHostToDevice)); cudaCheckError(hipMalloc((void**)&devffted, NPOL * sizeof(hipfftComplex*))); cudaCheckError(hipMalloc((void**)&ffted[0], fftedsize * sizeof(hipfftComplex))); cudaCheckError(hipMalloc((void**)&ffted[1], fftedsize * sizeof(hipfftComplex))); cudaCheckError(hipMemcpy(devffted, ffted, NPOL * sizeof(hipfftComplex*), hipMemcpyHostToDevice)); cudaCheckError(hipMalloc((void**)&devpower, powersize * (filhead.nbits / 8))); } vector<std::pair<FrameInfo, FrameInfo>> vdifframes; FrameInfo tmpframea, tmpframeb; int refsecond; int frameno; int epoch; WriteFilterbankHeader(filfile, filhead); Timing runtimes; runtimes.readtime = 0.0f; runtimes.scaletime = 0.0f; runtimes.filtime = 0.0f; runtimes.savetime = 0.0f; runtimes.totaltime = 0.0f; runtimes.intertime = 0.0f; std::chrono::time_point<std::chrono::steady_clock> readstart, readend, scalestart, scaleend, filstart, filend, savestart, saveend, interstart, interend; float *tmpunpackeda = new float[unpackedsize]; float *tmpunpackedb = new float[unpackedsize]; hipfftComplex *tmpffta = new hipfftComplex[fftedsize]; hipfftComplex *tmpfftb = new hipfftComplex[fftedsize]; bool saved = false; //float *dmeans; //float *dstdevs; //cudaCheckError(hipMalloc((void**)&dmeans, FFTUSE * sizeof(float))); //cudaCheckError(hipMalloc((void**)&dstdevs, FFTUSE * sizeof(float))); thrust::device_vector<float> dmeans, dstdevs; dmeans.resize(FFTUSE); dstdevs.resize(FFTUSE); thrust::fill(dmeans.begin(), dmeans.end(), 0.0f); thrust::fill(dstdevs.begin(), dstdevs.end(), 0.0f); float *pdmeans = thrust::raw_pointer_cast(dmeans.data()); float *pdstdevs = thrust::raw_pointer_cast(dstdevs.data()); cout << "Size of the device vectors: " << dmeans.size() << " " << dstdevs.size() << endl; scalestart = std::chrono::steady_clock::now(); // NOTE: Use first 5 accumulates of data to obtain scaling factors if (scaling) { size_t divfactors = 5 * powersize / FFTUSE; thrust::device_vector<float> dfactors; dfactors.resize(divfactors + 1); thrust::sequence(dfactors.begin(), dfactors.end()); thrust::transform(dfactors.begin(), dfactors.end(), dfactors.begin(), FactorFunctor()); float *pdfactors = thrust::raw_pointer_cast(dfactors.data()); //float *dfactors; //size_t divfactors = 5 * powersize / FFTUSE; //cudaCheckError(hipMalloc((void**)&dfactors, divfactors * sizeof(float))); //int scalethreads = 1024; //int scaleblocks = (divfactors - 1) / scalethreads + 1; //cout << "Div factors blocks: " << scaleblocks << " and threads: " << scalethreads << endl; //InitDivFactors<<<scaleblocks, scalethreads>>>(dfactors, divfactors); //cudaCheckError(hipDeviceSynchronize()); //cudaCheckError(hipGetLastError()); size_t processed = 0; float *tmpdpower; cudaCheckError(hipMalloc((void**)&tmpdpower, powersize * sizeof(float))); while((filepola.tellg() < (5 * NACCUMULATE * 8032)) && (filepolb.tellg() < (5 * NACCUMULATE * 8032))) { for (int iacc = 0; iacc < NACCUMULATE; ++iacc) { filepola.read(reinterpret_cast<char*>(vdifheadpola), 32); filepolb.read(reinterpret_cast<char*>(vdifheadpolb), 32); filepola.read(reinterpret_cast<char*>(tmppola) + iacc * 8000, 8000); filepolb.read(reinterpret_cast<char*>(tmppolb) + iacc * 8000, 8000); } cudaCheckError(hipMemcpy(datapol[0], tmppola, NACCUMULATE * 8000 * sizeof(unsigned char), hipMemcpyHostToDevice)); cudaCheckError(hipMemcpy(datapol[1], tmppolb, NACCUMULATE * 8000 * sizeof(unsigned char), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( UnpackKernel), dim3(50), dim3(1024), 0, 0, devpol, devunpacked, toread); for (int ipol = 0; ipol < NPOL; ++ipol) { cufftCheckError(hipfftExecR2C(fftplan, unpacked[ipol], ffted[ipol])); } hipLaunchKernelGGL(( DetectKernel), dim3(25), dim3(FFTUSE), 0, 0, devffted, tmpdpower); cudaCheckError(hipDeviceSynchronize()); hipLaunchKernelGGL(( GetScalingFactorsKernel), dim3(1), dim3(FFTUSE), 0, 0, tmpdpower, pdmeans, pdstdevs, pdfactors, processed); processed += (powersize / FFTUSE); cudaCheckError(hipDeviceSynchronize()); } //float *hmeans = new float[FFTUSE]; //float *hstdevs = new float[FFTUSE]; //cudaCheckError(hipMemcpy(hmeans, dmeans, FFTUSE * sizeof(float), hipMemcpyDeviceToHost)); //cudaCheckError(hipMemcpy(hstdevs, dstdevs, FFTUSE * sizeof(float), hipMemcpyDeviceToHost)); thrust::host_vector<float> hmeans = dmeans; thrust::host_vector<float> hstdevs = dstdevs; std::ofstream statsfile("mean_stdev.dat"); cout << "Size of host vector:" << hmeans.size() << endl; if (statsfile) { for (int ichan = 0; ichan < hmeans.size(); ++ichan) { statsfile << hmeans[ichan] << " " << hstdevs[ichan] << endl; } } else { cerr << "Could not open the stats file" << endl; } statsfile.close(); hipFree(tmpdpower); } scaleend = std::chrono::steady_clock::now(); runtimes.scaletime = std::chrono::duration<float>(scaleend - scalestart).count(); filepola.seekg(0, filepola.beg); filepolb.seekg(0, filepolb.beg); std::ofstream unpackedfilea ((outfil + ".unp0").c_str(), std::ios_base::binary); std::ofstream unpackedfileb ((outfil + ".unp1").c_str(), std::ios_base::binary); std::ofstream fftfilea ((outfil + ".fft0").c_str(), std::ios_base::binary); std::ofstream fftfileb ((outfil + ".fft1").c_str(), std::ios_base::binary); while((filepola.tellg() < (filelengtha - NACCUMULATE * 8000)) && (filepolb.tellg() < (filelengthb - NACCUMULATE * 8000))) { //cout << filepola.tellg() << endl; // NOTE: This implementation for (int iacc = 0; iacc < NACCUMULATE; ++iacc) { readstart = std::chrono::steady_clock::now(); filepola.read(reinterpret_cast<char*>(vdifheadpola), 32); filepolb.read(reinterpret_cast<char*>(vdifheadpolb), 32); filepola.read(reinterpret_cast<char*>(tmppola) + iacc * 8000, 8000); filepolb.read(reinterpret_cast<char*>(tmppolb) + iacc * 8000, 8000); readend = std::chrono::steady_clock::now(); runtimes.readtime += std::chrono::duration<float>(readend - readstart).count(); refsecond = (unsigned int)(vdifheadpola[0] | (vdifheadpola[1] << 8) | (vdifheadpola[2] << 16) | ((vdifheadpola[3] & 0x3f) << 24)); frameno = (unsigned int)(vdifheadpola[4] | (vdifheadpola[5] << 8) | (vdifheadpola[6] << 16)); epoch = (unsigned int)(vdifheadpola[7] & 0x3f); // frameno += (refsecond - startsecond) * 4000; tmpframea.frameno = frameno; tmpframea.refsecond = refsecond; tmpframea.refepoch = epoch; refsecond = (unsigned int)(vdifheadpolb[0] | (vdifheadpolb[1] << 8) | (vdifheadpolb[2] << 16) | ((vdifheadpolb[3] & 0x3f) << 24)); frameno = (unsigned int)(vdifheadpolb[4] | (vdifheadpolb[5] << 8) | (vdifheadpolb[6] << 16)); epoch = (unsigned int)(vdifheadpolb[7] & 0x3f); // frameno += (refsecond - startsecond) * 4000; tmpframeb.frameno = frameno; tmpframeb.refsecond = refsecond; tmpframeb.refepoch = epoch; vdifframes.push_back(std::make_pair(tmpframea, tmpframeb)); // NOTE: Can use subtract startframe to put frame count at 0 and use that to save into the buffer } if (GPURUN) { filstart = std::chrono::steady_clock::now(); cudaCheckError(hipMemcpy(datapol[0], tmppola, NACCUMULATE * 8000 * sizeof(unsigned char), hipMemcpyHostToDevice)); cudaCheckError(hipMemcpy(datapol[1], tmppolb, NACCUMULATE * 8000 * sizeof(unsigned char), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( UnpackKernel), dim3(50), dim3(1024), 0, 0, devpol, devunpacked, toread); for (int ipol = 0; ipol < NPOL; ++ipol) { cufftCheckError(hipfftExecR2C(fftplan, unpacked[ipol], ffted[ipol])); } if (filhead.nbits == 8) { hipLaunchKernelGGL(( DetectScaleKernel), dim3(25), dim3(FFTUSE), 0, 0, devffted, reinterpret_cast<unsigned char*>(devpower), pdmeans, pdstdevs); } else if (filhead.nbits == 32) { hipLaunchKernelGGL(( DetectKernel), dim3(25), dim3(FFTUSE), 0, 0, devffted, reinterpret_cast<float*>(devpower)); } else { cerr << "Unsupported option! Will use float!" << endl; hipLaunchKernelGGL(( DetectKernel), dim3(25), dim3(FFTUSE), 0, 0, devffted, reinterpret_cast<float*>(devpower)); } //PowerKernel<<<25, FFTUSE, 0, 0>>>(devffted, devpower); cudaCheckError(hipDeviceSynchronize()); cudaCheckError(hipMemcpy(tmppower, devpower, powersize * filhead.nbits / 8, hipMemcpyDeviceToHost)); if (!saved) { std::ofstream unpackedfile("unpacked.dat"); cudaCheckError(hipMemcpy(tmpunpackeda, unpacked[0], 2 * 8000 * 4 * sizeof(float), hipMemcpyDeviceToHost)); cudaCheckError(hipMemcpy(tmpunpackedb, unpacked[1], 2 * 8000 * 4 * sizeof(float), hipMemcpyDeviceToHost)); for (int isamp = 0; isamp < 2 * 8000 * 4; ++isamp) { unpackedfile << tmpunpackeda[isamp] << " " << tmpunpackedb[isamp] << endl; } unpackedfile.close(); saved = true; } filend = std::chrono::steady_clock::now(); runtimes.filtime += std::chrono::duration<float>(filend - filstart).count(); savestart = std::chrono::steady_clock::now(); filfile.write(reinterpret_cast<char*>(tmppower), powersize * filhead.nbits / 8); saveend = std::chrono::steady_clock::now(); runtimes.savetime += std::chrono::duration<float>(saveend - savestart).count(); if (saveinter) { interstart = std::chrono::steady_clock::now(); cudaCheckError(hipMemcpy(tmpunpackeda, unpacked[0], unpackedsize * sizeof(float), hipMemcpyDeviceToHost)); cudaCheckError(hipMemcpy(tmpunpackedb, unpacked[1], unpackedsize * sizeof(float), hipMemcpyDeviceToHost)); /*for (int isamp = 0; isamp < unpackedsize; ++isamp) { unpackedfile << tmpunpackeda[isamp] << " " << tmpunpackedb[isamp] << endl; }*/ unpackedfilea.write(reinterpret_cast<char*>(tmpunpackeda), unpackedsize * sizeof(float)); unpackedfileb.write(reinterpret_cast<char*>(tmpunpackedb), unpackedsize * sizeof(float)); cudaCheckError(hipMemcpy(tmpffta, ffted[0], fftedsize * sizeof(hipfftComplex), hipMemcpyDeviceToHost)); cudaCheckError(hipMemcpy(tmpfftb, ffted[1], fftedsize * sizeof(hipfftComplex), hipMemcpyDeviceToHost)); /*for (int isamp = 0; isamp < fftedsize; ++isamp) { fftfile << tmpffta[isamp].x << " " << tmpffta[isamp].y << " " << tmpfftb[isamp].x << " " << tmpfftb[isamp].y << endl; }*/ fftfilea.write(reinterpret_cast<char*>(tmpffta), fftedsize * sizeof(hipfftComplex)); fftfileb.write(reinterpret_cast<char*>(tmpfftb), fftedsize * sizeof(hipfftComplex)); interend = std::chrono::steady_clock::now(); runtimes.intertime += std::chrono::duration<float>(interend - interstart).count(); } } cout << "Completed " << std::fixed << std::setprecision(2) << (float)filepola.tellg() / (float)(filelengtha - 1.0) * 100.0f << "%\r"; cout.flush(); } cout << endl; filfile.close(); unpackedfilea.close(); unpackedfileb.close(); fftfilea.close(); fftfileb.close(); runtimes.totaltime = runtimes.readtime + runtimes.scaletime + runtimes.filtime + runtimes.savetime + runtimes.intertime; cout << "Total execution time: " << runtimes.totaltime << "s\n"; cout << "\tScaling factors: " << runtimes.scaletime << "s\n"; cout << "\tFile read: " << runtimes.readtime << "s\n"; cout << "\tFilterbanking: " << runtimes.filtime << "s\n"; cout << "\tFile write: " << runtimes.savetime << "s\n"; if (saveinter) { cout << "\tIntermediate write: " << runtimes.intertime << "s\n"; } if (DEBUG) { std::ofstream outframes("dataframes.dat"); outframes << "Ref Epoch A\tRef second A\tRef frame A\tRef Epoch B\tRef second B\tRef frame b\n"; for (auto iframe = vdifframes.begin(); iframe != vdifframes.end(); ++iframe) { outframes << iframe->first.refepoch << "\t" << iframe->first.refsecond << "\t" << iframe->first.frameno << "\t" << iframe->second.refepoch << "\t" << iframe->second.refsecond << "\t" << iframe->second.frameno << endl; } outframes.close(); } return 0; }
ec832685d73844e851536badfbe88569a24ab584.cu
#include <chrono> #include <iomanip> #include <iostream> #include <fstream> #include <string> #include <utility> #include <vector> #include <cuda.h> #include <cufft.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <thrust/host_vector.h> #include <thrust/sequence.h> #include <thrust/transform.h> #include "filterbank.hpp" #include "errors.hpp" using std::cerr; using std::cout; using std::endl; using std::ifstream; using std::ofstream; using std::string; using std::vector; #define DEBUG 0 #define GPURUN 1 #define NACCUMULATE 4000 #define NPOL 2 #define PERBLOCK 625 #define TIMEAVG 16 #define TIMESCALE 0.125 #define UNPACKFACTOR 4 #define VDIFSIZE 8000 #define FFTOUT 257 #define FFTUSE 256 struct FrameInfo { unsigned int frameno; unsigned int refsecond; unsigned int refepoch; }; struct Timing { float readtime; float scaletime; float filtime; float savetime; float totaltime; float intertime; }; struct FactorFunctor { __host__ __device__ float operator()(float val) { return val != 0 ? 1.0f/val : val; } }; __constant__ unsigned char kMask[] = {0x03, 0x0C, 0x30, 0xC0}; __global__ void UnpackKernel(unsigned char **in, float **out, size_t samples) { // NOTE: Each thread in the block processes 625 incoming bytes int idx = blockIdx.x * blockDim.x * PERBLOCK + threadIdx.x; int tmod = threadIdx.x % 4; // NOTE: Each thread can store one value __shared__ unsigned char incoming[1024]; int outidx = blockIdx.x * blockDim.x * PERBLOCK * 4; for (int isamp = 0; isamp < PERBLOCK; ++isamp) { if (idx < samples) { for (int ipol = 0; ipol < 2; ++ipol) { incoming[threadIdx.x] = in[ipol][idx]; __syncthreads(); int outidx2 = outidx + threadIdx.x; for (int ichunk = 0; ichunk < 4; ++ichunk) { int inidx = threadIdx.x / 4 + ichunk * 256; unsigned char inval = incoming[inidx]; out[ipol][outidx2] = static_cast<float>(static_cast<short>(((inval & kMask[tmod]) >> (2 * tmod)))); outidx2 += 1024; } } } idx += blockDim.x; outidx += blockDim.x * 4; } } // NOTE: Does not do any frequency averaging // NOTE: Outputs only the total intensity and no other Stokes parameters // NOTE: PERBLOCK is the number of output samples per block __global__ void DetectKernel(cufftComplex** __restrict__ in, float* __restrict__ out) { int outidx = blockIdx.x * PERBLOCK * FFTUSE + FFTUSE - threadIdx.x - 1; int inidx = blockIdx.x * PERBLOCK * TIMEAVG * FFTOUT + threadIdx.x + 1; float outvalue = 0.0f; cufftComplex polval; for (int isamp = 0; isamp < PERBLOCK; ++isamp) { // NOTE: Read the data from the incoming array for (int ipol = 0; ipol < 2; ++ipol) { for (int iavg = 0; iavg < TIMEAVG; ++iavg) { polval = in[ipol][inidx + iavg * FFTOUT]; outvalue += polval.x * polval.x + polval.y * polval.y; } } outvalue *= TIMESCALE; out[outidx] = outvalue; inidx += FFTOUT * TIMEAVG; outidx += FFTUSE; outvalue = 0.0; } } __global__ void DetectScaleKernel(cufftComplex** __restrict__ in, unsigned char* __restrict__ out, float* __restrict__ means, float* __restrict__ stdevs) { int outidx = blockIdx.x * PERBLOCK * FFTUSE + FFTUSE - threadIdx.x - 1; int inidx = blockIdx.x * PERBLOCK * TIMEAVG * FFTOUT + threadIdx.x + 1; float outvalue = 0.0f; cufftComplex polval; int scaled = 0; for (int isamp = 0; isamp < PERBLOCK; ++isamp) { // NOTE: Read the data from the incoming array for (int ipol = 0; ipol < 2; ++ipol) { for (int iavg = 0; iavg < TIMEAVG; ++iavg) { polval = in[ipol][inidx + iavg * FFTOUT]; outvalue += polval.x * polval.x + polval.y * polval.y; } } outvalue *= TIMESCALE; scaled = __float2int_ru((outvalue - means[FFTUSE - threadIdx.x - 1]) / stdevs[FFTUSE - threadIdx.x - 1] * 32.0f + 128.0f); if (scaled > 255) { scaled = 255; } else if (scaled < 0) { scaled = 0; } out[outidx] = (unsigned char)scaled; inidx += FFTOUT * TIMEAVG; outidx += FFTUSE; outvalue = 0.0; } } __global__ void InitDivFactors(float *factors, size_t togenerate) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // NOTE: I don't want to be dividing by 0 // NOTE: idx of 0 will not be used anyway if (idx < togenerate) { if (idx != 0) { factors[idx] = 1.0f / idx; } else { factors[idx] = idx; } } } __global__ void GetScalingFactorsKernel(float* __restrict__ indata, float *base, float *stdev, float *factors, int processed) { // NOTE: Filterbank file format coming in //float mean = indata[threadIdx.x]; float mean = 0.0f; // NOTE: Depending whether I save STD or VAR at the end of every run // float estd = stdev[threadIdx.x]; float estd = stdev[threadIdx.x] * stdev[threadIdx.x] * (processed - 1.0f); float oldmean = base[threadIdx.x]; //float estd = 0.0f; //float oldmean = 0.0; float val = 0.0f; float diff = 0.0; // NOTE: There are 15625 output time samples per NACCUMULATE frames for (int isamp = 0; isamp < 15625; ++isamp) { val = indata[isamp * FFTUSE + threadIdx.x]; diff = val - oldmean; mean = oldmean + diff * factors[processed + isamp + 1]; estd += diff * (val - mean); oldmean = mean; } base[threadIdx.x] = mean; stdev[threadIdx.x] = sqrtf(estd / (float)(processed + 15625 - 1.0f)); // stdev[threadIdx.x] = estd; } int main(int argc, char *argv[]) { string inpola, inpolb, outfil, config; double readsec; bool scaling = false; bool saveinter = false; if ((argc < 5) || (argv[1] == "-h") || (argv[1] == "--help")) { cout << "Incorrect number of arguments!" << endl; cout << "Command line options:" << endl << "-a <filename> - input file for polarisation a" << endl << "-b <filename> - input file for polarisation b" << endl << "-o <filename> - output filterbank file" << endl << "-c <filename> - input configuration file" << endl << "-r <number> - number of seconds to process - CURRENTLY NOT WORKING" << endl << "-s - enable scaling from 32 bits to 8 bits" << endl << "-i - enable saving the intermediate data products" << endl << "-h, --help - display this message" << endl; exit(EXIT_SUCCESS); } for (int iarg = 0; iarg < argc; ++iarg) { if (std::string(argv[iarg]) == "-a") { iarg++; inpola = std::string(argv[iarg]); } else if (std::string(argv[iarg]) == "-b") { iarg++; inpolb = std::string(argv[iarg]); } else if (std::string(argv[iarg]) == "-o") { iarg++; outfil = std::string(argv[iarg]); } else if (std::string(argv[iarg]) == "-c") { iarg++; config = std::string(argv[iarg]); } else if (std::string(argv[iarg]) == "-s") { cout << "Will scale the data to 8 bits" << endl; scaling = true; } else if (std::string(argv[iarg]) == "-i") { cout << "Will save the intermediate products" << endl; saveinter = true; } else if (std::string(argv[iarg]) == "-r") { iarg++; readsec = std::stod(argv[iarg]); } } cout << "Input files: " << inpola << " " << inpolb << endl; FilHead filhead; ReadFilterbankHeader(config, filhead); if (scaling) { filhead.nbits = 8; } // TODO: This will be wrong for R2C FFT filhead.tsamp = 1.0 / (2.0 * filhead.foff) * 2 * FFTUSE * TIMEAVG; // TODO: Make sure it is the middle of the top frequency channel filhead.fch1 = (filhead.fch1 + filhead.foff / 2.0f) * 1e-06; filhead.nchans = FFTUSE; filhead.foff = -1.0 * filhead.foff / FFTUSE * 1e-06 ; filhead.fch1 = filhead.fch1 + filhead.foff / 2.0; if (DEBUG) { cout << "Some header info:\n" << "Raw file: " << filhead.rawfile << endl << "Source name: " << filhead.source << endl << "Azimuth: " << filhead.az << endl << "Zenith angle: " << filhead.za << endl << "Declination: " << filhead.dec << endl << "Right ascension: " << filhead.ra << endl << "Top channel frequency: " << filhead.fch1 << endl << "Channel bandwidth: " << filhead.foff << endl << "Number of channels: " << filhead.nchans << endl << "Sampling time: " << filhead.tsamp << endl << "Bits per sample: " << filhead.nbits << endl; } // TODO: Make sure there are correct values for bandwidth and sampling time in the header after taking averaging into account ifstream filepola(inpola.c_str(), ifstream::in | ifstream::binary); ifstream filepolb(inpolb.c_str(), ifstream::in | ifstream::binary); ofstream filfile(outfil.c_str(), ofstream::out | ofstream::binary); if (!filepola || !filepolb) { if (!filepola) { cout << "Could not open file " << inpola << endl; } if (!filepolb) { cout << "Could not open file " << inpolb << endl; } exit(EXIT_FAILURE); } // TODO: Can save the filterbank header straight away, after the first header is read unsigned char vdifheadpola[32]; unsigned char vdifheadpolb[32]; filepola.read(reinterpret_cast<char*>(vdifheadpola), 32); filepolb.read(reinterpret_cast<char*>(vdifheadpolb), 32); filepola.seekg(0, filepola.end); long long filelengtha = filepola.tellg(); filepola.seekg(0, filepola.beg); filepolb.seekg(0, filepolb.end); long long filelengthb = filepolb.tellg(); filepolb.seekg(0, filepolb.beg); unsigned int startframe; unsigned int startsecond; cout << filelengtha << " " << filelengthb << endl; startframe = (unsigned int)(vdifheadpola[4] | (vdifheadpola[5] << 8) | (vdifheadpola[6] << 16)); // frame number in this second startsecond = (unsigned int)(vdifheadpola[0] | (vdifheadpola[1] << 8) | (vdifheadpola[2] << 16) | ((vdifheadpola[3] & 0x3f) << 24)); if (DEBUG) { cout << "Starting time: " << startsecond << ":" << startframe << endl; } // NOTE: Need to read headers in unsigned int toread = NACCUMULATE * 8000; // NOTE: No more headers after unpacking unsigned int unpackedsize = NACCUMULATE * VDIFSIZE * UNPACKFACTOR; unsigned int fftedsize = unpackedsize / (2 * FFTUSE) * FFTOUT; unsigned int powersize = unpackedsize / (2 * FFTUSE) * FFTUSE / TIMEAVG; cufftHandle fftplan; int fftsizes[1]; fftsizes[0] = 2 * FFTUSE; int fftbatchsize = unpackedsize / fftsizes[0]; cout << fftbatchsize << endl; cufftCheckError(cufftPlanMany(&fftplan, 1, fftsizes, NULL, 1, FFTUSE, NULL, 1, FFTUSE, CUFFT_R2C, fftbatchsize)); unsigned char *tmppola = new unsigned char[toread]; unsigned char *tmppolb = new unsigned char[toread]; unsigned char *devpola; unsigned char *devpolb; unsigned char **datapol = new unsigned char*[NPOL]; unsigned char **devpol; float **unpacked = new float*[NPOL]; float **devunpacked; cufftComplex **ffted = new cufftComplex*[NPOL]; cufftComplex **devffted; unsigned char *devpower; unsigned char *tmppower = new unsigned char[powersize * filhead.nbits / 8]; if (GPURUN) { cudaCheckError(cudaMalloc((void**)&devpola, toread * sizeof(unsigned char))); cudaCheckError(cudaMalloc((void**)&devpolb, toread * sizeof(unsigned char))); cudaCheckError(cudaMalloc((void**)&devpol, NPOL * sizeof(unsigned char*))); cudaCheckError(cudaMalloc((void**)&datapol[0], toread * sizeof(unsigned char))); cudaCheckError(cudaMalloc((void**)&datapol[1], toread * sizeof(unsigned char))); cudaCheckError(cudaMemcpy(devpol, datapol, NPOL * sizeof(unsigned char*), cudaMemcpyHostToDevice)); cudaCheckError(cudaMalloc((void**)&devunpacked, NPOL * sizeof(float*))); cudaCheckError(cudaMalloc((void**)&unpacked[0], unpackedsize * sizeof(float))); cudaCheckError(cudaMalloc((void**)&unpacked[1], unpackedsize * sizeof(float))); cudaCheckError(cudaMemcpy(devunpacked, unpacked, NPOL * sizeof(float*), cudaMemcpyHostToDevice)); cudaCheckError(cudaMalloc((void**)&devffted, NPOL * sizeof(cufftComplex*))); cudaCheckError(cudaMalloc((void**)&ffted[0], fftedsize * sizeof(cufftComplex))); cudaCheckError(cudaMalloc((void**)&ffted[1], fftedsize * sizeof(cufftComplex))); cudaCheckError(cudaMemcpy(devffted, ffted, NPOL * sizeof(cufftComplex*), cudaMemcpyHostToDevice)); cudaCheckError(cudaMalloc((void**)&devpower, powersize * (filhead.nbits / 8))); } vector<std::pair<FrameInfo, FrameInfo>> vdifframes; FrameInfo tmpframea, tmpframeb; int refsecond; int frameno; int epoch; WriteFilterbankHeader(filfile, filhead); Timing runtimes; runtimes.readtime = 0.0f; runtimes.scaletime = 0.0f; runtimes.filtime = 0.0f; runtimes.savetime = 0.0f; runtimes.totaltime = 0.0f; runtimes.intertime = 0.0f; std::chrono::time_point<std::chrono::steady_clock> readstart, readend, scalestart, scaleend, filstart, filend, savestart, saveend, interstart, interend; float *tmpunpackeda = new float[unpackedsize]; float *tmpunpackedb = new float[unpackedsize]; cufftComplex *tmpffta = new cufftComplex[fftedsize]; cufftComplex *tmpfftb = new cufftComplex[fftedsize]; bool saved = false; //float *dmeans; //float *dstdevs; //cudaCheckError(cudaMalloc((void**)&dmeans, FFTUSE * sizeof(float))); //cudaCheckError(cudaMalloc((void**)&dstdevs, FFTUSE * sizeof(float))); thrust::device_vector<float> dmeans, dstdevs; dmeans.resize(FFTUSE); dstdevs.resize(FFTUSE); thrust::fill(dmeans.begin(), dmeans.end(), 0.0f); thrust::fill(dstdevs.begin(), dstdevs.end(), 0.0f); float *pdmeans = thrust::raw_pointer_cast(dmeans.data()); float *pdstdevs = thrust::raw_pointer_cast(dstdevs.data()); cout << "Size of the device vectors: " << dmeans.size() << " " << dstdevs.size() << endl; scalestart = std::chrono::steady_clock::now(); // NOTE: Use first 5 accumulates of data to obtain scaling factors if (scaling) { size_t divfactors = 5 * powersize / FFTUSE; thrust::device_vector<float> dfactors; dfactors.resize(divfactors + 1); thrust::sequence(dfactors.begin(), dfactors.end()); thrust::transform(dfactors.begin(), dfactors.end(), dfactors.begin(), FactorFunctor()); float *pdfactors = thrust::raw_pointer_cast(dfactors.data()); //float *dfactors; //size_t divfactors = 5 * powersize / FFTUSE; //cudaCheckError(cudaMalloc((void**)&dfactors, divfactors * sizeof(float))); //int scalethreads = 1024; //int scaleblocks = (divfactors - 1) / scalethreads + 1; //cout << "Div factors blocks: " << scaleblocks << " and threads: " << scalethreads << endl; //InitDivFactors<<<scaleblocks, scalethreads>>>(dfactors, divfactors); //cudaCheckError(cudaDeviceSynchronize()); //cudaCheckError(cudaGetLastError()); size_t processed = 0; float *tmpdpower; cudaCheckError(cudaMalloc((void**)&tmpdpower, powersize * sizeof(float))); while((filepola.tellg() < (5 * NACCUMULATE * 8032)) && (filepolb.tellg() < (5 * NACCUMULATE * 8032))) { for (int iacc = 0; iacc < NACCUMULATE; ++iacc) { filepola.read(reinterpret_cast<char*>(vdifheadpola), 32); filepolb.read(reinterpret_cast<char*>(vdifheadpolb), 32); filepola.read(reinterpret_cast<char*>(tmppola) + iacc * 8000, 8000); filepolb.read(reinterpret_cast<char*>(tmppolb) + iacc * 8000, 8000); } cudaCheckError(cudaMemcpy(datapol[0], tmppola, NACCUMULATE * 8000 * sizeof(unsigned char), cudaMemcpyHostToDevice)); cudaCheckError(cudaMemcpy(datapol[1], tmppolb, NACCUMULATE * 8000 * sizeof(unsigned char), cudaMemcpyHostToDevice)); UnpackKernel<<<50, 1024, 0, 0>>>(devpol, devunpacked, toread); for (int ipol = 0; ipol < NPOL; ++ipol) { cufftCheckError(cufftExecR2C(fftplan, unpacked[ipol], ffted[ipol])); } DetectKernel<<<25, FFTUSE, 0, 0>>>(devffted, tmpdpower); cudaCheckError(cudaDeviceSynchronize()); GetScalingFactorsKernel<<<1, FFTUSE, 0, 0>>>(tmpdpower, pdmeans, pdstdevs, pdfactors, processed); processed += (powersize / FFTUSE); cudaCheckError(cudaDeviceSynchronize()); } //float *hmeans = new float[FFTUSE]; //float *hstdevs = new float[FFTUSE]; //cudaCheckError(cudaMemcpy(hmeans, dmeans, FFTUSE * sizeof(float), cudaMemcpyDeviceToHost)); //cudaCheckError(cudaMemcpy(hstdevs, dstdevs, FFTUSE * sizeof(float), cudaMemcpyDeviceToHost)); thrust::host_vector<float> hmeans = dmeans; thrust::host_vector<float> hstdevs = dstdevs; std::ofstream statsfile("mean_stdev.dat"); cout << "Size of host vector:" << hmeans.size() << endl; if (statsfile) { for (int ichan = 0; ichan < hmeans.size(); ++ichan) { statsfile << hmeans[ichan] << " " << hstdevs[ichan] << endl; } } else { cerr << "Could not open the stats file" << endl; } statsfile.close(); cudaFree(tmpdpower); } scaleend = std::chrono::steady_clock::now(); runtimes.scaletime = std::chrono::duration<float>(scaleend - scalestart).count(); filepola.seekg(0, filepola.beg); filepolb.seekg(0, filepolb.beg); std::ofstream unpackedfilea ((outfil + ".unp0").c_str(), std::ios_base::binary); std::ofstream unpackedfileb ((outfil + ".unp1").c_str(), std::ios_base::binary); std::ofstream fftfilea ((outfil + ".fft0").c_str(), std::ios_base::binary); std::ofstream fftfileb ((outfil + ".fft1").c_str(), std::ios_base::binary); while((filepola.tellg() < (filelengtha - NACCUMULATE * 8000)) && (filepolb.tellg() < (filelengthb - NACCUMULATE * 8000))) { //cout << filepola.tellg() << endl; // NOTE: This implementation for (int iacc = 0; iacc < NACCUMULATE; ++iacc) { readstart = std::chrono::steady_clock::now(); filepola.read(reinterpret_cast<char*>(vdifheadpola), 32); filepolb.read(reinterpret_cast<char*>(vdifheadpolb), 32); filepola.read(reinterpret_cast<char*>(tmppola) + iacc * 8000, 8000); filepolb.read(reinterpret_cast<char*>(tmppolb) + iacc * 8000, 8000); readend = std::chrono::steady_clock::now(); runtimes.readtime += std::chrono::duration<float>(readend - readstart).count(); refsecond = (unsigned int)(vdifheadpola[0] | (vdifheadpola[1] << 8) | (vdifheadpola[2] << 16) | ((vdifheadpola[3] & 0x3f) << 24)); frameno = (unsigned int)(vdifheadpola[4] | (vdifheadpola[5] << 8) | (vdifheadpola[6] << 16)); epoch = (unsigned int)(vdifheadpola[7] & 0x3f); // frameno += (refsecond - startsecond) * 4000; tmpframea.frameno = frameno; tmpframea.refsecond = refsecond; tmpframea.refepoch = epoch; refsecond = (unsigned int)(vdifheadpolb[0] | (vdifheadpolb[1] << 8) | (vdifheadpolb[2] << 16) | ((vdifheadpolb[3] & 0x3f) << 24)); frameno = (unsigned int)(vdifheadpolb[4] | (vdifheadpolb[5] << 8) | (vdifheadpolb[6] << 16)); epoch = (unsigned int)(vdifheadpolb[7] & 0x3f); // frameno += (refsecond - startsecond) * 4000; tmpframeb.frameno = frameno; tmpframeb.refsecond = refsecond; tmpframeb.refepoch = epoch; vdifframes.push_back(std::make_pair(tmpframea, tmpframeb)); // NOTE: Can use subtract startframe to put frame count at 0 and use that to save into the buffer } if (GPURUN) { filstart = std::chrono::steady_clock::now(); cudaCheckError(cudaMemcpy(datapol[0], tmppola, NACCUMULATE * 8000 * sizeof(unsigned char), cudaMemcpyHostToDevice)); cudaCheckError(cudaMemcpy(datapol[1], tmppolb, NACCUMULATE * 8000 * sizeof(unsigned char), cudaMemcpyHostToDevice)); UnpackKernel<<<50, 1024, 0, 0>>>(devpol, devunpacked, toread); for (int ipol = 0; ipol < NPOL; ++ipol) { cufftCheckError(cufftExecR2C(fftplan, unpacked[ipol], ffted[ipol])); } if (filhead.nbits == 8) { DetectScaleKernel<<<25, FFTUSE, 0, 0>>>(devffted, reinterpret_cast<unsigned char*>(devpower), pdmeans, pdstdevs); } else if (filhead.nbits == 32) { DetectKernel<<<25, FFTUSE, 0, 0>>>(devffted, reinterpret_cast<float*>(devpower)); } else { cerr << "Unsupported option! Will use float!" << endl; DetectKernel<<<25, FFTUSE, 0, 0>>>(devffted, reinterpret_cast<float*>(devpower)); } //PowerKernel<<<25, FFTUSE, 0, 0>>>(devffted, devpower); cudaCheckError(cudaDeviceSynchronize()); cudaCheckError(cudaMemcpy(tmppower, devpower, powersize * filhead.nbits / 8, cudaMemcpyDeviceToHost)); if (!saved) { std::ofstream unpackedfile("unpacked.dat"); cudaCheckError(cudaMemcpy(tmpunpackeda, unpacked[0], 2 * 8000 * 4 * sizeof(float), cudaMemcpyDeviceToHost)); cudaCheckError(cudaMemcpy(tmpunpackedb, unpacked[1], 2 * 8000 * 4 * sizeof(float), cudaMemcpyDeviceToHost)); for (int isamp = 0; isamp < 2 * 8000 * 4; ++isamp) { unpackedfile << tmpunpackeda[isamp] << " " << tmpunpackedb[isamp] << endl; } unpackedfile.close(); saved = true; } filend = std::chrono::steady_clock::now(); runtimes.filtime += std::chrono::duration<float>(filend - filstart).count(); savestart = std::chrono::steady_clock::now(); filfile.write(reinterpret_cast<char*>(tmppower), powersize * filhead.nbits / 8); saveend = std::chrono::steady_clock::now(); runtimes.savetime += std::chrono::duration<float>(saveend - savestart).count(); if (saveinter) { interstart = std::chrono::steady_clock::now(); cudaCheckError(cudaMemcpy(tmpunpackeda, unpacked[0], unpackedsize * sizeof(float), cudaMemcpyDeviceToHost)); cudaCheckError(cudaMemcpy(tmpunpackedb, unpacked[1], unpackedsize * sizeof(float), cudaMemcpyDeviceToHost)); /*for (int isamp = 0; isamp < unpackedsize; ++isamp) { unpackedfile << tmpunpackeda[isamp] << " " << tmpunpackedb[isamp] << endl; }*/ unpackedfilea.write(reinterpret_cast<char*>(tmpunpackeda), unpackedsize * sizeof(float)); unpackedfileb.write(reinterpret_cast<char*>(tmpunpackedb), unpackedsize * sizeof(float)); cudaCheckError(cudaMemcpy(tmpffta, ffted[0], fftedsize * sizeof(cufftComplex), cudaMemcpyDeviceToHost)); cudaCheckError(cudaMemcpy(tmpfftb, ffted[1], fftedsize * sizeof(cufftComplex), cudaMemcpyDeviceToHost)); /*for (int isamp = 0; isamp < fftedsize; ++isamp) { fftfile << tmpffta[isamp].x << " " << tmpffta[isamp].y << " " << tmpfftb[isamp].x << " " << tmpfftb[isamp].y << endl; }*/ fftfilea.write(reinterpret_cast<char*>(tmpffta), fftedsize * sizeof(cufftComplex)); fftfileb.write(reinterpret_cast<char*>(tmpfftb), fftedsize * sizeof(cufftComplex)); interend = std::chrono::steady_clock::now(); runtimes.intertime += std::chrono::duration<float>(interend - interstart).count(); } } cout << "Completed " << std::fixed << std::setprecision(2) << (float)filepola.tellg() / (float)(filelengtha - 1.0) * 100.0f << "%\r"; cout.flush(); } cout << endl; filfile.close(); unpackedfilea.close(); unpackedfileb.close(); fftfilea.close(); fftfileb.close(); runtimes.totaltime = runtimes.readtime + runtimes.scaletime + runtimes.filtime + runtimes.savetime + runtimes.intertime; cout << "Total execution time: " << runtimes.totaltime << "s\n"; cout << "\tScaling factors: " << runtimes.scaletime << "s\n"; cout << "\tFile read: " << runtimes.readtime << "s\n"; cout << "\tFilterbanking: " << runtimes.filtime << "s\n"; cout << "\tFile write: " << runtimes.savetime << "s\n"; if (saveinter) { cout << "\tIntermediate write: " << runtimes.intertime << "s\n"; } if (DEBUG) { std::ofstream outframes("dataframes.dat"); outframes << "Ref Epoch A\tRef second A\tRef frame A\tRef Epoch B\tRef second B\tRef frame b\n"; for (auto iframe = vdifframes.begin(); iframe != vdifframes.end(); ++iframe) { outframes << iframe->first.refepoch << "\t" << iframe->first.refsecond << "\t" << iframe->first.frameno << "\t" << iframe->second.refepoch << "\t" << iframe->second.refsecond << "\t" << iframe->second.frameno << endl; } outframes.close(); } return 0; }
dd148014f32fd84d08189af93a5a153cb3d5bb59.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include "timing.h" #include "util.h" // this method is cited from CMU's assignment description // http://15418.courses.cs.cmu.edu/spring2015/article/4 __global__ void buildUpTree(int* device_output, int len, int starter, int interval) { int index = (blockIdx.x * blockDim.x + threadIdx.x) * interval; // return if out of range if (index + interval > len) { return; } device_output[index + interval - 1] += device_output[index + starter - 1]; } __global__ void buildDownTree(int* device_output, int len, int starter, int interval) { int index = (blockIdx.x * blockDim.x + threadIdx.x) * interval; // return if out of range if (index + interval > len) { return; } int temp = device_output[index + starter - 1]; device_output[index + starter - 1] = device_output[index + interval - 1]; device_output[index + interval - 1] += temp; } void launch_scan(int roundedLen, int* device_output, int threadsPerBlock) { // build up tree for (int starter = 1; starter < roundedLen; starter *= 2) { int interval = starter * 2; // plus (threadsPerBlock - 1) is actually ceiling the block number int numBlocks = (roundedLen / interval + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( buildUpTree), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, device_output, roundedLen, starter, interval); } // set last element to zero int zero = 0; hipMemcpy(&device_output[roundedLen - 1], &zero, sizeof(int), hipMemcpyHostToDevice); // build down tree for (int starter = roundedLen / 2; starter >= 1; starter /= 2) { int interval = starter * 2; // plus (threadsPerBlock - 1) is actually ceiling the block number int numBlocks = (roundedLen / interval + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( buildDownTree), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, device_output, roundedLen, starter, interval); } } void exclusive_scan_parallel(int* nums, int len, int* output, double& time_cost) { int* device_output; int roundedLen = roundPowerTwo(len); // Allocate space on GPU and copy inputs into it hipMalloc((void **)&device_output, roundedLen * sizeof(int)); hipMemcpy(device_output, nums, len * sizeof(int), hipMemcpyHostToDevice); // Start to do GPU computing reset_and_start_timer(); // Since in-place algorithm is used, we did not allocate device_input launch_scan(roundedLen, device_output, 256); // Wait for all instances to finished hipDeviceSynchronize(); // Finished GPU computing time_cost = get_elapsed_mcycles(); // Copy back the result hipMemcpy(output, device_output, len * sizeof(int), hipMemcpyDeviceToHost); // Free device memory hipFree(device_output); } __global__ void setRepeat(int* device_input, int* device_outputBTemp, int* device_outputCTemp, int len) { int index = blockIdx.x * blockDim.x + threadIdx.x; // return if out of range if (index + 1 >= len) { return; } // set repeated bits if (device_input[index] == device_input[index + 1]) { device_outputBTemp[index] = 1; device_outputCTemp[index] = 0; } else { device_outputBTemp[index] = 0; device_outputCTemp[index] = 1; } } __global__ void getRepeat(int* nums, int* device_outputB, int* device_outputC, int* device_outputBTemp, int* device_outputCTemp, int len) { int index = blockIdx.x * blockDim.x + threadIdx.x; // return if out of range if (index + 1 >= len) { // check last bit if (index + 1 == len) { int curC = device_outputCTemp[index]; device_outputC[curC] = nums[index]; } return; } int curB = device_outputBTemp[index]; int curC = device_outputCTemp[index]; if (curB < device_outputBTemp[index + 1]) { device_outputB[curB] = index; } if (curC < device_outputCTemp[index + 1]) { device_outputC[curC] = nums[index]; } } int launch_find(int len, int roundedLen, int* device_input, int* device_outputB, int* device_outputC, int* device_outputBTemp, int* device_outputCTemp, int threadsPerBlock) { // call setRepeat function first int numBlocks = (len + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( setRepeat), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, device_input, device_outputBTemp, device_outputCTemp, len); // call exclusive scan again to add up previous output launch_scan(roundedLen, device_outputBTemp, threadsPerBlock); launch_scan(roundedLen, device_outputCTemp, threadsPerBlock); // get number of repeats int repeat_count; hipMemcpy(&repeat_count, &device_outputBTemp[len - 1], sizeof(int), hipMemcpyDeviceToHost); // call getRepeat function now hipLaunchKernelGGL(( getRepeat), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, device_input, device_outputB, device_outputC, device_outputBTemp, device_outputCTemp, len); return repeat_count; } int find_repeats_parallel(int* nums, int len, int* outputB, int* outputC, double& time_cost) { int* device_input; int* device_outputB; int* device_outputBTemp; int* device_outputC; int* device_outputCTemp; int roundedLen = roundPowerTwo(len); // Allocate space on GPU and copy inputs into it hipMalloc((void **)&device_input, len * sizeof(int)); hipMalloc((void **)&device_outputB, len * sizeof(int)); hipMalloc((void **)&device_outputC, len * sizeof(int)); hipMalloc((void **)&device_outputBTemp, roundedLen * sizeof(int)); hipMalloc((void **)&device_outputCTemp, roundedLen * sizeof(int)); hipMemcpy(device_input, nums, len * sizeof(int), hipMemcpyHostToDevice); // Start to do GPU computing reset_and_start_timer(); int repeat_count = launch_find(len, roundedLen, device_input, device_outputB, device_outputC, device_outputBTemp, device_outputCTemp, 256); // Wait for all instances to finished hipDeviceSynchronize(); // Finished GPU computing time_cost = get_elapsed_mcycles(); // Copy back the result hipMemcpy(outputB, device_outputB, repeat_count * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(outputC, device_outputC, (len - repeat_count) * sizeof(int), hipMemcpyDeviceToHost); // Free device memory hipFree(device_input); hipFree(device_outputB); hipFree(device_outputC); hipFree(device_outputBTemp); hipFree(device_outputCTemp); return repeat_count; }
dd148014f32fd84d08189af93a5a153cb3d5bb59.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include "timing.h" #include "util.h" // this method is cited from CMU's assignment description // http://15418.courses.cs.cmu.edu/spring2015/article/4 __global__ void buildUpTree(int* device_output, int len, int starter, int interval) { int index = (blockIdx.x * blockDim.x + threadIdx.x) * interval; // return if out of range if (index + interval > len) { return; } device_output[index + interval - 1] += device_output[index + starter - 1]; } __global__ void buildDownTree(int* device_output, int len, int starter, int interval) { int index = (blockIdx.x * blockDim.x + threadIdx.x) * interval; // return if out of range if (index + interval > len) { return; } int temp = device_output[index + starter - 1]; device_output[index + starter - 1] = device_output[index + interval - 1]; device_output[index + interval - 1] += temp; } void launch_scan(int roundedLen, int* device_output, int threadsPerBlock) { // build up tree for (int starter = 1; starter < roundedLen; starter *= 2) { int interval = starter * 2; // plus (threadsPerBlock - 1) is actually ceiling the block number int numBlocks = (roundedLen / interval + threadsPerBlock - 1) / threadsPerBlock; buildUpTree<<<numBlocks, threadsPerBlock>>>(device_output, roundedLen, starter, interval); } // set last element to zero int zero = 0; cudaMemcpy(&device_output[roundedLen - 1], &zero, sizeof(int), cudaMemcpyHostToDevice); // build down tree for (int starter = roundedLen / 2; starter >= 1; starter /= 2) { int interval = starter * 2; // plus (threadsPerBlock - 1) is actually ceiling the block number int numBlocks = (roundedLen / interval + threadsPerBlock - 1) / threadsPerBlock; buildDownTree<<<numBlocks, threadsPerBlock>>>(device_output, roundedLen, starter, interval); } } void exclusive_scan_parallel(int* nums, int len, int* output, double& time_cost) { int* device_output; int roundedLen = roundPowerTwo(len); // Allocate space on GPU and copy inputs into it cudaMalloc((void **)&device_output, roundedLen * sizeof(int)); cudaMemcpy(device_output, nums, len * sizeof(int), cudaMemcpyHostToDevice); // Start to do GPU computing reset_and_start_timer(); // Since in-place algorithm is used, we did not allocate device_input launch_scan(roundedLen, device_output, 256); // Wait for all instances to finished cudaThreadSynchronize(); // Finished GPU computing time_cost = get_elapsed_mcycles(); // Copy back the result cudaMemcpy(output, device_output, len * sizeof(int), cudaMemcpyDeviceToHost); // Free device memory cudaFree(device_output); } __global__ void setRepeat(int* device_input, int* device_outputBTemp, int* device_outputCTemp, int len) { int index = blockIdx.x * blockDim.x + threadIdx.x; // return if out of range if (index + 1 >= len) { return; } // set repeated bits if (device_input[index] == device_input[index + 1]) { device_outputBTemp[index] = 1; device_outputCTemp[index] = 0; } else { device_outputBTemp[index] = 0; device_outputCTemp[index] = 1; } } __global__ void getRepeat(int* nums, int* device_outputB, int* device_outputC, int* device_outputBTemp, int* device_outputCTemp, int len) { int index = blockIdx.x * blockDim.x + threadIdx.x; // return if out of range if (index + 1 >= len) { // check last bit if (index + 1 == len) { int curC = device_outputCTemp[index]; device_outputC[curC] = nums[index]; } return; } int curB = device_outputBTemp[index]; int curC = device_outputCTemp[index]; if (curB < device_outputBTemp[index + 1]) { device_outputB[curB] = index; } if (curC < device_outputCTemp[index + 1]) { device_outputC[curC] = nums[index]; } } int launch_find(int len, int roundedLen, int* device_input, int* device_outputB, int* device_outputC, int* device_outputBTemp, int* device_outputCTemp, int threadsPerBlock) { // call setRepeat function first int numBlocks = (len + threadsPerBlock - 1) / threadsPerBlock; setRepeat<<<numBlocks, threadsPerBlock>>>(device_input, device_outputBTemp, device_outputCTemp, len); // call exclusive scan again to add up previous output launch_scan(roundedLen, device_outputBTemp, threadsPerBlock); launch_scan(roundedLen, device_outputCTemp, threadsPerBlock); // get number of repeats int repeat_count; cudaMemcpy(&repeat_count, &device_outputBTemp[len - 1], sizeof(int), cudaMemcpyDeviceToHost); // call getRepeat function now getRepeat<<<numBlocks, threadsPerBlock>>>(device_input, device_outputB, device_outputC, device_outputBTemp, device_outputCTemp, len); return repeat_count; } int find_repeats_parallel(int* nums, int len, int* outputB, int* outputC, double& time_cost) { int* device_input; int* device_outputB; int* device_outputBTemp; int* device_outputC; int* device_outputCTemp; int roundedLen = roundPowerTwo(len); // Allocate space on GPU and copy inputs into it cudaMalloc((void **)&device_input, len * sizeof(int)); cudaMalloc((void **)&device_outputB, len * sizeof(int)); cudaMalloc((void **)&device_outputC, len * sizeof(int)); cudaMalloc((void **)&device_outputBTemp, roundedLen * sizeof(int)); cudaMalloc((void **)&device_outputCTemp, roundedLen * sizeof(int)); cudaMemcpy(device_input, nums, len * sizeof(int), cudaMemcpyHostToDevice); // Start to do GPU computing reset_and_start_timer(); int repeat_count = launch_find(len, roundedLen, device_input, device_outputB, device_outputC, device_outputBTemp, device_outputCTemp, 256); // Wait for all instances to finished cudaThreadSynchronize(); // Finished GPU computing time_cost = get_elapsed_mcycles(); // Copy back the result cudaMemcpy(outputB, device_outputB, repeat_count * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(outputC, device_outputC, (len - repeat_count) * sizeof(int), cudaMemcpyDeviceToHost); // Free device memory cudaFree(device_input); cudaFree(device_outputB); cudaFree(device_outputC); cudaFree(device_outputBTemp); cudaFree(device_outputCTemp); return repeat_count; }
ab2977890d06d651cd555521a04bf113c0ed6a6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GPUfilter.h" //#include "FilterBlock.h" void Convolver::initialize(std::vector< std::vector<float> >& filters) { c_log_msg(LOG_INFO, "Convolver::initialize"); this->cleanup(); // Destroy past filters int pad = this->buffer_len_-1; int f_len = this->filter_len_; int number_of_filters = this->num_outputs_*this->num_inputs_; c_log_msg(LOG_INFO, "Convolver::initialize - filter len %d, pad %d", f_len, pad); int d_h_size = number_of_filters*(f_len+2*pad); // filter with padding int d_dl_size = this->num_outputs_*this->dl_len_; // output delay line int d_y_size = this->num_outputs_*this->buffer_len_; // output buffer int d_x_size = this->num_inputs_*this->buffer_len_; // input buffer float* d_h = valueToDevice<float>(d_h_size, 0.f, 0); float* d_dl = valueToDevice<float>(d_dl_size, 0.f, 0); float* d_y = valueToDevice<float>(d_y_size, 0.f, 0); float* d_x = valueToDevice<float>(d_x_size, 0.f, 0); c_log_msg(LOG_INFO, "Convolver::initialize - number of filters %d", number_of_filters); // Put filter to device, add the padding for(int i = 0; i < number_of_filters; i++) { float* h_filter_ptr = &(filters.at(i)[0]); float* d_filter_ptr = &(d_h[pad+i*(f_len+2*pad)]); copyHostToDevice<float>(f_len, d_filter_ptr, h_filter_ptr, 0); } this->d_filters_ = d_h; this->d_output_dl_ = d_dl; this->d_current_out_frame_ = d_y; this->d_current_in_frame_ = d_x; c_log_msg(LOG_INFO, "Convolver::initialize - done"); } void Convolver::passBuffersThrough(float* input_buffers, float* output_buffers) { int b_l = this->num_inputs_*this->buffer_len_; float* d_in= this->d_current_in_frame_; float* d_out = this->d_current_out_frame_; float* d_dl = this->d_output_dl_; copyHostToDevice<float>(b_l, d_in, input_buffers, 0); dim3 block(512); dim3 grid((int)(this->buffer_len_/block.x)+1); hipLaunchKernelGGL(( passThroughKernel), dim3(grid), dim3(block), 0, 0, d_in, d_out, d_dl, this->num_inputs_, this->buffer_len_, this->dl_len_, this->dl_loc_); copyDeviceToHost<float>(b_l, output_buffers, d_out, 0); this->dl_loc_ = MOD(this->dl_loc_+=this->buffer_len_, this->dl_len_); } void Convolver::convolveT(float* input_buffers, float* output_buffers) { // Size of the frame int b_l = this->num_inputs_*this->buffer_len_; // Filter len, fitlers are padded int f_l = this->filter_len_+2*this->pad_; float* d_in= this->d_current_in_frame_; float* d_out = this->d_current_out_frame_; float* d_dl = this->d_output_dl_; float* d_h = this->d_filters_; // Copy current frame to device copyHostToDevice<float>(b_l, d_in, input_buffers, 0); float* debug = (float*)NULL; //float* debug = valueToDevice<float>(this->conv_len_, 0.f, 0); dim3 block(256); dim3 grid((int)(this->conv_len_/block.x)+1); c_log_msg(LOG_DEBUG, "Launch, block x %u, y %u, z %u, grid x %d, y %d, z %d", block.x, block.y, block.z, grid.x, grid.y, grid.z); hipLaunchKernelGGL(( convolveBuffers<256, 2>), dim3(grid), dim3(block), 0, 0, d_in, d_out, d_dl, d_h+this->pad_, this->num_inputs_, this->buffer_len_, f_l, this->dl_len_, this->dl_loc_, this->conv_len_, debug); //copyDeviceToHost<float>(b_l, output_buffers, d_out, 0); //float* h_debug = fromDevice<float>(this->conv_len_, debug, 0); //for(int i = 0; i < this->conv_len_; i++) // printf("Debug vec %d: %f \n", i, h_debug[i]); this->dl_loc_ = MOD(this->dl_loc_+=this->buffer_len_, this->dl_len_); //destroyMem<float>(debug); //printf("Dl loc %d\n", this->dl_loc_); } // The lenght of y must be len_x+len_h-1, otherwise apeshit void convolutionCPU(const float* h_x, const float* h_h, float* h_y, int len_x, int len_h) { int conv_len = len_x+len_h-1; for(int global_idx = 0; global_idx < conv_len; global_idx++) { float sample = 0.f; if(global_idx < len_x) { for(int i = 0; i < global_idx; i++) { sample += h_x[i]*h_h[global_idx-i]; } } // middle else if(global_idx < len_h){ for(int i = 0; i < len_x; i++) { sample += h_x[i]*h_h[global_idx-i]; } } // end else { int dist = conv_len-global_idx; for(int i = 0; i < dist; i++) { sample += h_x[len_x-dist+i]*h_h[len_h-1-i]; } } h_y[global_idx] = sample; } return; } void convolutionCPUfftw(fftw_plan& fft, fftw_plan& ifft, int len_x, int len_h) { } void convolutionGPU(const float* d_x, const float* d_h, float* d_y, int len_x, int len_h) { int conv_len = len_x+len_h-1; dim3 block(512); dim3 grid((int)(conv_len/block.x)+1); //printf("Convlen %d, filter len %d , buffer len %d " // "Block size %d, Grid size %d \n", // conv_len, len_h, len_x, block.x, grid.x); hipLaunchKernelGGL(( convolve), dim3(grid), dim3(block), 0, 0, d_x, d_h, d_y, len_x, len_h, conv_len); } void convolutionGPUpadH(const float* d_x, const float* d_h, float* d_y, int len_x, int len_h) { int pad = len_x-1; int conv_len = len_x+len_h-1; dim3 block(512); dim3 grid((int)(conv_len/block.x)+1); //printf("PAD Convlen %d, filter len %d , buffer len %d " // "Block size %d, Grid size %d \n", // conv_len, len_h, len_x, block.x, grid.x); hipLaunchKernelGGL(( convolvePadH), dim3(grid), dim3(block), 0, 0, d_x, d_h+pad, d_y, len_x, len_h, conv_len); } void convolutionGPUshared(const float* d_x, const float* d_h, float* d_y, int len_x, int len_h) { int pad = len_x-1; int conv_len = len_x+len_h-1; dim3 block(BUFFER_LEN); dim3 grid((int)(conv_len/block.x)+1); //printf("PAD Convlen %d, filter len %d , buffer len %d " // "Block size %d, Grid size %d \n", // conv_len, len_h, len_x, block.x, grid.x); hipLaunchKernelGGL(( convolvePadHShared<BUFFER_LEN>), dim3(grid), dim3(block), 0, 0, d_x, d_h+pad, d_y, len_x, len_h, conv_len); } int launchConvolutionLinear(FilterBlock* filter_block) { // Note to self: float* ptr = thrust::raw_pointer_cast(&(thrust_vect[0])) return 0; } // This function takes the input buffer, updates the convolution buffer // int launch(float* h_input_buffers, float* h_output_buffers, float* d_filters, float* d_convolution_buffer, const int num_inputs, const int num_outputs, const int conv_buffer_len, const int buffer_len, const int filter_len) { dim3 block(512); dim3 grid((int)(conv_buffer_len)/block.x); return 0; } void multiplyAddBuffers(const hipfftComplex* fdl, const hipfftComplex* H, hipfftComplex* accum, int fdl_idx, int H_idx, int window_len) { int fdl_mem_pos = fdl_idx*window_len; int H_mem_pos = H_idx*window_len; dim3 block(BUFFER_LEN); dim3 grid(window_len/BUFFER_LEN+1); hipLaunchKernelGGL(( multiplyAndAccumulate), dim3(grid), dim3(block), 0, 0, fdl+fdl_mem_pos, H+H_mem_pos, accum, window_len); } ///////////////////////////// // Kernels ////// __device__ void cudMultiplyAdd(const hipfftComplex* a, const hipfftComplex* b, hipfftComplex* c) { (*c).x += (*a).x*(*b).x-(*a).y*(*b).y; (*c).y += (*a).x*(*b).y+(*a).y*(*b).x; } __global__ void multiplyAndAccumulate(const hipfftComplex* x1, const hipfftComplex* x2, hipfftComplex* result, int len) { int global_idx = blockIdx.x*blockDim.x+threadIdx.x; if(global_idx < len) cudMultiplyAdd(&(x1[global_idx]), &(x2[global_idx]), &(result[global_idx])); } // This function is launched so that h is always > x __global__ void convolve(const float* d_x, const float* d_h, float* d_ret, int len_x, int len_h, int len) { int global_idx = blockIdx.x*blockDim.x +threadIdx.x; float sample = 0.f; float x = 0.f; float h = 0.f; // begin if(global_idx<len){ if(global_idx < len_x) { for(int i = 0; i < global_idx; i++) { x = d_x[i]; h = d_h[global_idx-i]; sample += x*h; } } // middle else if(global_idx < len_h){ for(int i = 0; i < len_x; i++) { x = d_x[i]; h = d_h[global_idx-i]; sample += x*h; } } // end else { int dist = len-global_idx; for(int i = 0; i < dist; i++) { x = d_x[len_x-dist+i]; h = d_h[len_h-1-i]; sample += x*h; } } d_ret[global_idx] = sample; } } // This function is launched so that h is always > x __global__ void convolvePadH(const float* d_x, const float* d_h, float* d_ret, int len_x, int len_h, int len) { int global_idx = blockIdx.x*blockDim.x+threadIdx.x; float sample = 0.f; float x = 0.f; float h = 0.f; if(global_idx<len){ // H padded, just run it //#pragma unroll for(int i = 0; i < len_x; i++) { x = d_x[i]; h = d_h[global_idx-i]; sample += x*h; } } d_ret[global_idx] = sample; } template < int BUFFER_SIZE > __global__ void convolvePadHShared(const float* d_x, const float* d_h, float* d_ret, int len_x, int len_h, int len) { /// Shared memory allocation __shared__ int block_begin; block_begin = blockIdx.x*blockDim.x; int thread_double = threadIdx.x+BUFFER_SIZE; int global_idx = block_begin+threadIdx.x; __shared__ float s_h[2*BUFFER_SIZE]; __shared__ float s_x[BUFFER_SIZE]; s_h[threadIdx.x] = d_h[global_idx-BUFFER_SIZE+1]; s_h[thread_double] = d_h[block_begin+thread_double-BUFFER_SIZE+1]; s_x[threadIdx.x] = d_x[threadIdx.x]; __syncthreads(); /// End shared memory float sample = 0.f; if(global_idx<len){ for(int i = 0; i < len_x; i++) { sample+=s_x[i]*s_h[BUFFER_SIZE-1+threadIdx.x-i]; } } d_ret[global_idx] = sample; } __global__ void passThroughKernel(float* d_input, float* d_output, float* d_dl, int num_channels, int buffer_len, int dl_len, int dl_loc) { int global_idx = blockIdx.x*blockDim.x+threadIdx.x; int dl_idx = MOD(dl_loc+global_idx, dl_len); if(global_idx < buffer_len) { #pragma unroll for(int i = 0; i < num_channels; i++) { d_dl[dl_idx+i*dl_len] = d_input[global_idx+i*buffer_len]; } #pragma unroll for(int j = 0; j < num_channels; j++) { d_output[global_idx+j*buffer_len] = d_dl[dl_idx+j*dl_len]; } } } __global__ void appendConvolutionBuffer(float* d_convolution_buffer, float* buffer_len){} template <int BUFFER_SIZE, int NUM_CHANNELS> __global__ void convolveBuffers(float* d_input, float* d_output, float* d_dl, float* d_h, int num_channels, int buffer_len, int filter_len, int dl_len, int dl_loc, int conv_len, float* debug) { /// Shared memory allocation __shared__ int block_begin; block_begin = blockIdx.x*blockDim.x; int global_idx = block_begin+threadIdx.x; int thread_double = threadIdx.x+BUFFER_SIZE; __shared__ float s_h[NUM_CHANNELS][2*BUFFER_SIZE]; __shared__ float s_x[NUM_CHANNELS][BUFFER_SIZE]; for(int i = 0; i < NUM_CHANNELS; i++) { int f_idx = i*filter_len-BUFFER_SIZE+1; s_h[i][threadIdx.x] = d_h[global_idx+f_idx]; s_h[i][thread_double] = d_h[block_begin+thread_double+f_idx]; s_x[i][threadIdx.x] = d_input[threadIdx.x+i*buffer_len]; } __syncthreads(); /// End shared memory allocation // Calculate delayline indices int dl_idx_past = MOD(dl_loc+global_idx-buffer_len, dl_len); int dl_idx = MOD(dl_loc+global_idx, dl_len); //////////////// // Go through the length of the convolution // // Calculate the y values via convolution of x and h // of the current channel // // Grab the tail of the past convolution from the // delay line // // Save values to the delay line // if(global_idx<conv_len) { for(int j = 0; j < NUM_CHANNELS; j++) { float sample = 0.f; float dl_val = d_dl[dl_idx+dl_len*j]; for(int i = 0; i < buffer_len; i++) { sample+= s_x[j][i]*s_h[j][thread_double-i-1]; } // Add the sample to the delay line sample sample += dl_val; // Insert the convolved sample to the delay line d_dl[dl_idx+dl_len*j] = sample; //if(j == 0) // debug[global_idx] = sample; // Insert the sample to the output buffer if(global_idx < buffer_len){ // Erase past buffer in the delay line d_dl[dl_idx_past+dl_len*j] = 0.f; //sample += s_x[j][global_idx]; //d_dl[dl_idx+dl_len*j] = sample; d_output[global_idx+j*buffer_len] = sample; } } } }
ab2977890d06d651cd555521a04bf113c0ed6a6e.cu
#include "GPUfilter.h" //#include "FilterBlock.h" void Convolver::initialize(std::vector< std::vector<float> >& filters) { c_log_msg(LOG_INFO, "Convolver::initialize"); this->cleanup(); // Destroy past filters int pad = this->buffer_len_-1; int f_len = this->filter_len_; int number_of_filters = this->num_outputs_*this->num_inputs_; c_log_msg(LOG_INFO, "Convolver::initialize - filter len %d, pad %d", f_len, pad); int d_h_size = number_of_filters*(f_len+2*pad); // filter with padding int d_dl_size = this->num_outputs_*this->dl_len_; // output delay line int d_y_size = this->num_outputs_*this->buffer_len_; // output buffer int d_x_size = this->num_inputs_*this->buffer_len_; // input buffer float* d_h = valueToDevice<float>(d_h_size, 0.f, 0); float* d_dl = valueToDevice<float>(d_dl_size, 0.f, 0); float* d_y = valueToDevice<float>(d_y_size, 0.f, 0); float* d_x = valueToDevice<float>(d_x_size, 0.f, 0); c_log_msg(LOG_INFO, "Convolver::initialize - number of filters %d", number_of_filters); // Put filter to device, add the padding for(int i = 0; i < number_of_filters; i++) { float* h_filter_ptr = &(filters.at(i)[0]); float* d_filter_ptr = &(d_h[pad+i*(f_len+2*pad)]); copyHostToDevice<float>(f_len, d_filter_ptr, h_filter_ptr, 0); } this->d_filters_ = d_h; this->d_output_dl_ = d_dl; this->d_current_out_frame_ = d_y; this->d_current_in_frame_ = d_x; c_log_msg(LOG_INFO, "Convolver::initialize - done"); } void Convolver::passBuffersThrough(float* input_buffers, float* output_buffers) { int b_l = this->num_inputs_*this->buffer_len_; float* d_in= this->d_current_in_frame_; float* d_out = this->d_current_out_frame_; float* d_dl = this->d_output_dl_; copyHostToDevice<float>(b_l, d_in, input_buffers, 0); dim3 block(512); dim3 grid((int)(this->buffer_len_/block.x)+1); passThroughKernel<<<grid, block>>>(d_in, d_out, d_dl, this->num_inputs_, this->buffer_len_, this->dl_len_, this->dl_loc_); copyDeviceToHost<float>(b_l, output_buffers, d_out, 0); this->dl_loc_ = MOD(this->dl_loc_+=this->buffer_len_, this->dl_len_); } void Convolver::convolveT(float* input_buffers, float* output_buffers) { // Size of the frame int b_l = this->num_inputs_*this->buffer_len_; // Filter len, fitlers are padded int f_l = this->filter_len_+2*this->pad_; float* d_in= this->d_current_in_frame_; float* d_out = this->d_current_out_frame_; float* d_dl = this->d_output_dl_; float* d_h = this->d_filters_; // Copy current frame to device copyHostToDevice<float>(b_l, d_in, input_buffers, 0); float* debug = (float*)NULL; //float* debug = valueToDevice<float>(this->conv_len_, 0.f, 0); dim3 block(256); dim3 grid((int)(this->conv_len_/block.x)+1); c_log_msg(LOG_DEBUG, "Launch, block x %u, y %u, z %u, grid x %d, y %d, z %d", block.x, block.y, block.z, grid.x, grid.y, grid.z); convolveBuffers<256, 2><<<grid, block>>>(d_in, d_out, d_dl, d_h+this->pad_, this->num_inputs_, this->buffer_len_, f_l, this->dl_len_, this->dl_loc_, this->conv_len_, debug); //copyDeviceToHost<float>(b_l, output_buffers, d_out, 0); //float* h_debug = fromDevice<float>(this->conv_len_, debug, 0); //for(int i = 0; i < this->conv_len_; i++) // printf("Debug vec %d: %f \n", i, h_debug[i]); this->dl_loc_ = MOD(this->dl_loc_+=this->buffer_len_, this->dl_len_); //destroyMem<float>(debug); //printf("Dl loc %d\n", this->dl_loc_); } // The lenght of y must be len_x+len_h-1, otherwise apeshit void convolutionCPU(const float* h_x, const float* h_h, float* h_y, int len_x, int len_h) { int conv_len = len_x+len_h-1; for(int global_idx = 0; global_idx < conv_len; global_idx++) { float sample = 0.f; if(global_idx < len_x) { for(int i = 0; i < global_idx; i++) { sample += h_x[i]*h_h[global_idx-i]; } } // middle else if(global_idx < len_h){ for(int i = 0; i < len_x; i++) { sample += h_x[i]*h_h[global_idx-i]; } } // end else { int dist = conv_len-global_idx; for(int i = 0; i < dist; i++) { sample += h_x[len_x-dist+i]*h_h[len_h-1-i]; } } h_y[global_idx] = sample; } return; } void convolutionCPUfftw(fftw_plan& fft, fftw_plan& ifft, int len_x, int len_h) { } void convolutionGPU(const float* d_x, const float* d_h, float* d_y, int len_x, int len_h) { int conv_len = len_x+len_h-1; dim3 block(512); dim3 grid((int)(conv_len/block.x)+1); //printf("Convlen %d, filter len %d , buffer len %d " // "Block size %d, Grid size %d \n", // conv_len, len_h, len_x, block.x, grid.x); convolve<<<grid, block>>>(d_x, d_h, d_y, len_x, len_h, conv_len); } void convolutionGPUpadH(const float* d_x, const float* d_h, float* d_y, int len_x, int len_h) { int pad = len_x-1; int conv_len = len_x+len_h-1; dim3 block(512); dim3 grid((int)(conv_len/block.x)+1); //printf("PAD Convlen %d, filter len %d , buffer len %d " // "Block size %d, Grid size %d \n", // conv_len, len_h, len_x, block.x, grid.x); convolvePadH<<<grid, block>>>(d_x, d_h+pad, d_y, len_x, len_h, conv_len); } void convolutionGPUshared(const float* d_x, const float* d_h, float* d_y, int len_x, int len_h) { int pad = len_x-1; int conv_len = len_x+len_h-1; dim3 block(BUFFER_LEN); dim3 grid((int)(conv_len/block.x)+1); //printf("PAD Convlen %d, filter len %d , buffer len %d " // "Block size %d, Grid size %d \n", // conv_len, len_h, len_x, block.x, grid.x); convolvePadHShared<BUFFER_LEN><<<grid, block>>>(d_x, d_h+pad, d_y, len_x, len_h, conv_len); } int launchConvolutionLinear(FilterBlock* filter_block) { // Note to self: float* ptr = thrust::raw_pointer_cast(&(thrust_vect[0])) return 0; } // This function takes the input buffer, updates the convolution buffer // int launch(float* h_input_buffers, float* h_output_buffers, float* d_filters, float* d_convolution_buffer, const int num_inputs, const int num_outputs, const int conv_buffer_len, const int buffer_len, const int filter_len) { dim3 block(512); dim3 grid((int)(conv_buffer_len)/block.x); return 0; } void multiplyAddBuffers(const cufftComplex* fdl, const cufftComplex* H, cufftComplex* accum, int fdl_idx, int H_idx, int window_len) { int fdl_mem_pos = fdl_idx*window_len; int H_mem_pos = H_idx*window_len; dim3 block(BUFFER_LEN); dim3 grid(window_len/BUFFER_LEN+1); multiplyAndAccumulate<<<grid, block>>>(fdl+fdl_mem_pos, H+H_mem_pos, accum, window_len); } ///////////////////////////// // Kernels ////// __device__ void cudMultiplyAdd(const cufftComplex* a, const cufftComplex* b, cufftComplex* c) { (*c).x += (*a).x*(*b).x-(*a).y*(*b).y; (*c).y += (*a).x*(*b).y+(*a).y*(*b).x; } __global__ void multiplyAndAccumulate(const cufftComplex* x1, const cufftComplex* x2, cufftComplex* result, int len) { int global_idx = blockIdx.x*blockDim.x+threadIdx.x; if(global_idx < len) cudMultiplyAdd(&(x1[global_idx]), &(x2[global_idx]), &(result[global_idx])); } // This function is launched so that h is always > x __global__ void convolve(const float* d_x, const float* d_h, float* d_ret, int len_x, int len_h, int len) { int global_idx = blockIdx.x*blockDim.x +threadIdx.x; float sample = 0.f; float x = 0.f; float h = 0.f; // begin if(global_idx<len){ if(global_idx < len_x) { for(int i = 0; i < global_idx; i++) { x = d_x[i]; h = d_h[global_idx-i]; sample += x*h; } } // middle else if(global_idx < len_h){ for(int i = 0; i < len_x; i++) { x = d_x[i]; h = d_h[global_idx-i]; sample += x*h; } } // end else { int dist = len-global_idx; for(int i = 0; i < dist; i++) { x = d_x[len_x-dist+i]; h = d_h[len_h-1-i]; sample += x*h; } } d_ret[global_idx] = sample; } } // This function is launched so that h is always > x __global__ void convolvePadH(const float* d_x, const float* d_h, float* d_ret, int len_x, int len_h, int len) { int global_idx = blockIdx.x*blockDim.x+threadIdx.x; float sample = 0.f; float x = 0.f; float h = 0.f; if(global_idx<len){ // H padded, just run it //#pragma unroll for(int i = 0; i < len_x; i++) { x = d_x[i]; h = d_h[global_idx-i]; sample += x*h; } } d_ret[global_idx] = sample; } template < int BUFFER_SIZE > __global__ void convolvePadHShared(const float* d_x, const float* d_h, float* d_ret, int len_x, int len_h, int len) { /// Shared memory allocation __shared__ int block_begin; block_begin = blockIdx.x*blockDim.x; int thread_double = threadIdx.x+BUFFER_SIZE; int global_idx = block_begin+threadIdx.x; __shared__ float s_h[2*BUFFER_SIZE]; __shared__ float s_x[BUFFER_SIZE]; s_h[threadIdx.x] = d_h[global_idx-BUFFER_SIZE+1]; s_h[thread_double] = d_h[block_begin+thread_double-BUFFER_SIZE+1]; s_x[threadIdx.x] = d_x[threadIdx.x]; __syncthreads(); /// End shared memory float sample = 0.f; if(global_idx<len){ for(int i = 0; i < len_x; i++) { sample+=s_x[i]*s_h[BUFFER_SIZE-1+threadIdx.x-i]; } } d_ret[global_idx] = sample; } __global__ void passThroughKernel(float* d_input, float* d_output, float* d_dl, int num_channels, int buffer_len, int dl_len, int dl_loc) { int global_idx = blockIdx.x*blockDim.x+threadIdx.x; int dl_idx = MOD(dl_loc+global_idx, dl_len); if(global_idx < buffer_len) { #pragma unroll for(int i = 0; i < num_channels; i++) { d_dl[dl_idx+i*dl_len] = d_input[global_idx+i*buffer_len]; } #pragma unroll for(int j = 0; j < num_channels; j++) { d_output[global_idx+j*buffer_len] = d_dl[dl_idx+j*dl_len]; } } } __global__ void appendConvolutionBuffer(float* d_convolution_buffer, float* buffer_len){} template <int BUFFER_SIZE, int NUM_CHANNELS> __global__ void convolveBuffers(float* d_input, float* d_output, float* d_dl, float* d_h, int num_channels, int buffer_len, int filter_len, int dl_len, int dl_loc, int conv_len, float* debug) { /// Shared memory allocation __shared__ int block_begin; block_begin = blockIdx.x*blockDim.x; int global_idx = block_begin+threadIdx.x; int thread_double = threadIdx.x+BUFFER_SIZE; __shared__ float s_h[NUM_CHANNELS][2*BUFFER_SIZE]; __shared__ float s_x[NUM_CHANNELS][BUFFER_SIZE]; for(int i = 0; i < NUM_CHANNELS; i++) { int f_idx = i*filter_len-BUFFER_SIZE+1; s_h[i][threadIdx.x] = d_h[global_idx+f_idx]; s_h[i][thread_double] = d_h[block_begin+thread_double+f_idx]; s_x[i][threadIdx.x] = d_input[threadIdx.x+i*buffer_len]; } __syncthreads(); /// End shared memory allocation // Calculate delayline indices int dl_idx_past = MOD(dl_loc+global_idx-buffer_len, dl_len); int dl_idx = MOD(dl_loc+global_idx, dl_len); //////////////// // Go through the length of the convolution // // Calculate the y values via convolution of x and h // of the current channel // // Grab the tail of the past convolution from the // delay line // // Save values to the delay line // if(global_idx<conv_len) { for(int j = 0; j < NUM_CHANNELS; j++) { float sample = 0.f; float dl_val = d_dl[dl_idx+dl_len*j]; for(int i = 0; i < buffer_len; i++) { sample+= s_x[j][i]*s_h[j][thread_double-i-1]; } // Add the sample to the delay line sample sample += dl_val; // Insert the convolved sample to the delay line d_dl[dl_idx+dl_len*j] = sample; //if(j == 0) // debug[global_idx] = sample; // Insert the sample to the output buffer if(global_idx < buffer_len){ // Erase past buffer in the delay line d_dl[dl_idx_past+dl_len*j] = 0.f; //sample += s_x[j][global_idx]; //d_dl[dl_idx+dl_len*j] = sample; d_output[global_idx+j*buffer_len] = sample; } } } }
1607cadda23723d8fe582042a6fdff682f4ab26b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "crate3Dplot.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *plotValues = NULL; hipMalloc(&plotValues, XSIZE*YSIZE); float patchSize = XSIZE*YSIZE; int itemsX = 1; int itemsY = 1; float maxValue = 1; float *vertexData = NULL; hipMalloc(&vertexData, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( crate3Dplot), dim3(gridBlock),dim3(threadBlock), 0, 0, plotValues,patchSize,itemsX,itemsY,maxValue,vertexData); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( crate3Dplot), dim3(gridBlock),dim3(threadBlock), 0, 0, plotValues,patchSize,itemsX,itemsY,maxValue,vertexData); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( crate3Dplot), dim3(gridBlock),dim3(threadBlock), 0, 0, plotValues,patchSize,itemsX,itemsY,maxValue,vertexData); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1607cadda23723d8fe582042a6fdff682f4ab26b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "crate3Dplot.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *plotValues = NULL; cudaMalloc(&plotValues, XSIZE*YSIZE); float patchSize = XSIZE*YSIZE; int itemsX = 1; int itemsY = 1; float maxValue = 1; float *vertexData = NULL; cudaMalloc(&vertexData, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); crate3Dplot<<<gridBlock,threadBlock>>>(plotValues,patchSize,itemsX,itemsY,maxValue,vertexData); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { crate3Dplot<<<gridBlock,threadBlock>>>(plotValues,patchSize,itemsX,itemsY,maxValue,vertexData); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { crate3Dplot<<<gridBlock,threadBlock>>>(plotValues,patchSize,itemsX,itemsY,maxValue,vertexData); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0f16e9ce237d6f38940cccc4f3ecfc326f26b7a8.hip
// !!! This is a file automatically generated by hipify!!! // CUDA Histogram Computation // Tim Demetriades // CPE 810 - GPU & Multicore Programming // Professor Feng // Stevens Institute of Technology #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "helper_cuda.h" //for __syncthreads() and atomicadd #ifndef __HIPCC__ #define __HIPCC__ #endif #include <hip/device_functions.h> #include <stdio.h> #include <cstdlib> #include <time.h> // for CPU timer #include <math.h> // for power function // Statically allocate shared memory #define SHARED_MEM_SIZE 16 // Size of each block // Kernel function for Histogram Computation using interleaved portioning (memory coalescing) __global__ void Histogram_GPU_1(unsigned int* device_input, unsigned int* device_bins, unsigned long input_size, unsigned int bin_size) { // Get thread id unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; for (unsigned int i = tid; i < input_size; i += (blockDim.x * gridDim.x)) { // blockDim.x * gridDim.x = total number of threads for each kernel invocation if (device_input[i] >= 0 && device_input[i] < 1024) { // Boundary condition atomicAdd(&device_bins[device_input[i] / bin_size], 1); } } } // Kernel function for Histogram Computation using shared memory (privatization) __global__ void Histogram_GPU_2(unsigned int* device_input, unsigned int* device_bins, unsigned long input_size, unsigned int bin_size, unsigned int num_bins) { // Get thread id unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // Private bins __device__ __shared__ int device_bins_private[SHARED_MEM_SIZE]; // Initialize private bins to 0 for (unsigned int binIdx = threadIdx.x; binIdx < num_bins; binIdx += blockDim.x) { device_bins_private[binIdx] = 0; } __syncthreads(); // Compute histogram for (unsigned int i = tid; i < input_size; i += blockDim.x * gridDim.x) { if (device_input[i] >= 0 && device_input[i] < 1024) { // Boundary condition atomicAdd(&device_bins_private[device_input[i] / bin_size], 1); } } __syncthreads(); // Move from shared to global memory for (unsigned int binIdx = threadIdx.x; binIdx < num_bins; binIdx += blockDim.x) { atomicAdd(&device_bins[binIdx], device_bins_private[binIdx]); } } // Kernel function for Histogram Computation using shared memory (privatization) and aggregation // Needs some tweaks __global__ void Histogram_GPU_3(unsigned int* device_input, unsigned int* device_bins, unsigned long input_size, unsigned int bin_size, unsigned int num_bins) { // Get thread id unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // Private bins __device__ __shared__ int device_bins_private[SHARED_MEM_SIZE]; // Initialize private bins to 0 for (unsigned int binIdx = threadIdx.x; binIdx < num_bins; binIdx += blockDim.x) { device_bins_private[binIdx] = 0; } __syncthreads(); unsigned int prev_index = -1; // Tracks index of histogram element whose updates have been aggregated (-1 so it won't have chance of matching value in bin) unsigned int accumulator = 0; // Keeps track of number of updates aggregated so far (0 means no updates have been aggregated) // Compute histogram for (unsigned int i = tid; i < input_size; i += blockDim.x * gridDim.x) { if (device_input[i] >= 0 && device_input[i] < 1024) { // Boundary condition unsigned int current_index = device_input[i] / bin_size; if (current_index != prev_index) { // Compare index of histogram element to be updated with index of one currently being aggregated if (accumulator >= 0) { atomicAdd(&device_bins_private[device_input[i] / bin_size], accumulator); // Current and previous are different, so add accumulator to value in bin accumulator = 1; prev_index = current_index; } } else { accumulator ++; // Current and previous match so increment accumulator } } } __syncthreads(); // Move from shared to global memory for (unsigned int binIdx = threadIdx.x; binIdx < num_bins; binIdx += blockDim.x) { atomicAdd(&device_bins[binIdx], device_bins_private[binIdx]); } } // Histogram Computation on CPU void Histogram_CPU(unsigned int* host_input, unsigned int input_size, unsigned int bin_size, unsigned int * host_bins) { for (int i = 0; i < input_size; i++) { host_bins[host_input[i] / bin_size]++; } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char* argv[]) { unsigned int num_bins = 0; unsigned long input_size = 0; if (argc == 4) { // 4 arguments expected (filename, -i, <BinNum>, <VecDim>) int bin_exponent = atoi(argv[2]); if (bin_exponent < 2 || bin_exponent > 8) { printf("\nPlease make sure <BinNum> is between 2 and 8.\n"); exit(EXIT_FAILURE); } // Set number of bins num_bins = pow(2, bin_exponent); printf("\nThere will be %d bins \n", num_bins); // Set number of input elements input_size = atoi(argv[3]); printf("Input vector is %d elements long \n", input_size); } else if (argc > 4) { printf("\nToo many arguments provided. \n"); printf("Enter arguments like this: \n"); printf("-i <BinNum> <VecDim> \n"); exit(EXIT_FAILURE); } else { printf("\n3 arguments expected. \n"); printf("Enter arguments like this: \n"); printf("-i <BinNum> <VecDim> \n"); exit(EXIT_FAILURE); } // Set number of elements per bin unsigned int bin_size = 1024 / num_bins; // 1024 is the max possible input element printf("Each bin will contain %u elements\n", bin_size); // Size in bytes of input vector size_t input_bytes = input_size * sizeof(int); // Size in bytes of bins size_t bin_bytes = num_bins * sizeof(int); // Allocate host memory for input vector and bins unsigned int* host_input; unsigned int* host_bins; unsigned int* host_bins_cpu; host_input = (unsigned int*)malloc(input_bytes); host_bins = (unsigned int*)malloc(bin_bytes); host_bins_cpu = (unsigned int*)malloc(bin_bytes); // Allocate device memory for input vector and bins unsigned int* device_input; unsigned int* device_bins; checkCudaErrors(hipMalloc((void**)&device_input, input_bytes)); checkCudaErrors(hipMalloc((void**)&device_bins, bin_bytes)); // Initialize input vector with ints between 0~1024 srand((unsigned int)time(NULL)); // Assigns seed to make random numbers change for (int i = 0; i < input_size; i++) { host_input[i] = rand() % 1024; } // Initialize bins with 0s for (int i = 0; i < num_bins; i++) { host_bins[i] = 0; } for (int i = 0; i < num_bins; i++) { host_bins_cpu[i] = 0; } //// Print input vector //printf("\nInput vector:\n"); //for (int i = 0; i < input_size; i++) { // printf("%d\t", host_input[i]); //} // Copy matrix values from host to device checkCudaErrors(hipMemcpy(device_input, host_input, input_bytes, hipMemcpyHostToDevice)); // dest, source, size in bytes, direction of transfer checkCudaErrors(hipMemcpy(device_bins, host_bins, bin_bytes, hipMemcpyHostToDevice)); // dest, source, size in bytes, direction of transfer // Set Grid and Block sizes int block_size = 16; // Threads per block int grid_size = ceil(input_size / block_size) + 1; // Blocks per grid dim3 dim_block(block_size); dim3 dim_grid(grid_size); // Record the start event (for timing GPU calculations) hipStream_t stream; hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); checkCudaErrors(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); checkCudaErrors(hipStreamSynchronize(stream)); checkCudaErrors(hipEventRecord(start, stream)); int nIter = 100; // How many times to run kernel printf("\nStarting Histogram Computation on GPU\n"); // Launch kernel (repeat nIter times so we can obtain average run time) // Leave the kernel you want to use un-commented and comment out the rest for (int i = 0; i < nIter; i++) { hipLaunchKernelGGL(( Histogram_GPU_1), dim3(dim_grid), dim3(dim_block), 0, 0, device_input, device_bins, input_size, bin_size); //Histogram_GPU_2<<<dim_grid, dim_block>>>(device_input, device_bins, input_size, bin_size, num_bins); //Histogram_GPU_3<<<dim_grid, dim_block>>>(device_input, device_bins, input_size, bin_size, num_bins); } printf("\n\GPU Histogram Computation Complete\n"); // Record the stop event checkCudaErrors(hipEventRecord(stop, stream)); // Wait for the stop event to complete checkCudaErrors(hipEventSynchronize(stop)); float msecTotal = 0.0f; checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop)); // Compute and print the performance float msecPerHistogram = msecTotal / nIter; printf("\nGPU Histogram Computation took %.3f msec\n", msecPerHistogram); printf("\nThreads per block = %d, Blocks per grid = %d, Total threads = %d\n", block_size, grid_size, block_size * grid_size); // Copy matrix values from device to host checkCudaErrors(hipMemcpy(host_bins, device_bins, bin_bytes, hipMemcpyDeviceToHost)); // Make sure bins from GPU have correct values for (int i = 0; i < num_bins; i++) { host_bins[i] /= nIter; } //// Print GPU results //printf("\nGPU Results: \n"); //for (int i = 0; i < num_bins; i++) { // printf("\nBins %d = %u", i, host_bins[i]); //} //int sum_bins = 0; //for (int i = 0; i < num_bins; i++) { // sum_bins += host_bins[i]; //} //printf("\n\nSummation of all the bins = %d\n", sum_bins); //Start CPU timer double time_taken_cpu = 0.0; clock_t begin_cpu = clock(); // Calculate histogram on CPU printf("\nStarting Histogram Computation on CPU\n"); for (int i = 0; i < nIter; i++) { // Repeat CPU computation same amount of times as GPU computation Histogram_CPU(host_input, input_size, bin_size, host_bins_cpu); } printf("\nCPU Histogram Computation Complete\n"); clock_t end_cpu = clock(); time_taken_cpu += ((double)(end_cpu - begin_cpu) / CLOCKS_PER_SEC * 1000) / nIter; // in milliseconds printf("\nCPU Histogram Computation took %.3f msec\n", time_taken_cpu); // Make sure bins from CPU have correct values for (int i = 0; i < num_bins; i++) { host_bins_cpu[i] /= nIter; } //// Print CPU results //printf("\nCPU Results: \n"); //for (int i = 0; i < num_bins; i++) { // printf("\nBins %d = %u", i, host_bins_cpu[i]); //} //int sum_bins_cpu = 0; //for (int i = 0; i < num_bins; i++) { // sum_bins_cpu += host_bins_cpu[i]; //} //printf("\n\nSummation of all the bins = %d\n", sum_bins_cpu); // Check if GPU and CPU histograms match bool check = 0; for (int i = 0; i < num_bins; i++) { // For every value in the arrays if (host_bins[i] != host_bins_cpu[i]) { // Check if they match and if not set a flag check = 1; } } if (check == 1) { printf("\nGPU and CPU histograms do not match!\n"); } else { printf("\nGPU and CPU histograms match!\n"); } // Free memory in device hipFree(device_input); hipFree(device_bins); // Free memory in host free(host_input); free(host_bins); }
0f16e9ce237d6f38940cccc4f3ecfc326f26b7a8.cu
// CUDA Histogram Computation // Tim Demetriades // CPE 810 - GPU & Multicore Programming // Professor Feng // Stevens Institute of Technology #include <cuda_runtime.h> #include <device_launch_parameters.h> #include "helper_cuda.h" //for __syncthreads() and atomicadd #ifndef __CUDACC__ #define __CUDACC__ #endif #include <device_functions.h> #include <stdio.h> #include <cstdlib> #include <time.h> // for CPU timer #include <math.h> // for power function // Statically allocate shared memory #define SHARED_MEM_SIZE 16 // Size of each block // Kernel function for Histogram Computation using interleaved portioning (memory coalescing) __global__ void Histogram_GPU_1(unsigned int* device_input, unsigned int* device_bins, unsigned long input_size, unsigned int bin_size) { // Get thread id unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; for (unsigned int i = tid; i < input_size; i += (blockDim.x * gridDim.x)) { // blockDim.x * gridDim.x = total number of threads for each kernel invocation if (device_input[i] >= 0 && device_input[i] < 1024) { // Boundary condition atomicAdd(&device_bins[device_input[i] / bin_size], 1); } } } // Kernel function for Histogram Computation using shared memory (privatization) __global__ void Histogram_GPU_2(unsigned int* device_input, unsigned int* device_bins, unsigned long input_size, unsigned int bin_size, unsigned int num_bins) { // Get thread id unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // Private bins __device__ __shared__ int device_bins_private[SHARED_MEM_SIZE]; // Initialize private bins to 0 for (unsigned int binIdx = threadIdx.x; binIdx < num_bins; binIdx += blockDim.x) { device_bins_private[binIdx] = 0; } __syncthreads(); // Compute histogram for (unsigned int i = tid; i < input_size; i += blockDim.x * gridDim.x) { if (device_input[i] >= 0 && device_input[i] < 1024) { // Boundary condition atomicAdd(&device_bins_private[device_input[i] / bin_size], 1); } } __syncthreads(); // Move from shared to global memory for (unsigned int binIdx = threadIdx.x; binIdx < num_bins; binIdx += blockDim.x) { atomicAdd(&device_bins[binIdx], device_bins_private[binIdx]); } } // Kernel function for Histogram Computation using shared memory (privatization) and aggregation // Needs some tweaks __global__ void Histogram_GPU_3(unsigned int* device_input, unsigned int* device_bins, unsigned long input_size, unsigned int bin_size, unsigned int num_bins) { // Get thread id unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // Private bins __device__ __shared__ int device_bins_private[SHARED_MEM_SIZE]; // Initialize private bins to 0 for (unsigned int binIdx = threadIdx.x; binIdx < num_bins; binIdx += blockDim.x) { device_bins_private[binIdx] = 0; } __syncthreads(); unsigned int prev_index = -1; // Tracks index of histogram element whose updates have been aggregated (-1 so it won't have chance of matching value in bin) unsigned int accumulator = 0; // Keeps track of number of updates aggregated so far (0 means no updates have been aggregated) // Compute histogram for (unsigned int i = tid; i < input_size; i += blockDim.x * gridDim.x) { if (device_input[i] >= 0 && device_input[i] < 1024) { // Boundary condition unsigned int current_index = device_input[i] / bin_size; if (current_index != prev_index) { // Compare index of histogram element to be updated with index of one currently being aggregated if (accumulator >= 0) { atomicAdd(&device_bins_private[device_input[i] / bin_size], accumulator); // Current and previous are different, so add accumulator to value in bin accumulator = 1; prev_index = current_index; } } else { accumulator ++; // Current and previous match so increment accumulator } } } __syncthreads(); // Move from shared to global memory for (unsigned int binIdx = threadIdx.x; binIdx < num_bins; binIdx += blockDim.x) { atomicAdd(&device_bins[binIdx], device_bins_private[binIdx]); } } // Histogram Computation on CPU void Histogram_CPU(unsigned int* host_input, unsigned int input_size, unsigned int bin_size, unsigned int * host_bins) { for (int i = 0; i < input_size; i++) { host_bins[host_input[i] / bin_size]++; } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char* argv[]) { unsigned int num_bins = 0; unsigned long input_size = 0; if (argc == 4) { // 4 arguments expected (filename, -i, <BinNum>, <VecDim>) int bin_exponent = atoi(argv[2]); if (bin_exponent < 2 || bin_exponent > 8) { printf("\nPlease make sure <BinNum> is between 2 and 8.\n"); exit(EXIT_FAILURE); } // Set number of bins num_bins = pow(2, bin_exponent); printf("\nThere will be %d bins \n", num_bins); // Set number of input elements input_size = atoi(argv[3]); printf("Input vector is %d elements long \n", input_size); } else if (argc > 4) { printf("\nToo many arguments provided. \n"); printf("Enter arguments like this: \n"); printf("-i <BinNum> <VecDim> \n"); exit(EXIT_FAILURE); } else { printf("\n3 arguments expected. \n"); printf("Enter arguments like this: \n"); printf("-i <BinNum> <VecDim> \n"); exit(EXIT_FAILURE); } // Set number of elements per bin unsigned int bin_size = 1024 / num_bins; // 1024 is the max possible input element printf("Each bin will contain %u elements\n", bin_size); // Size in bytes of input vector size_t input_bytes = input_size * sizeof(int); // Size in bytes of bins size_t bin_bytes = num_bins * sizeof(int); // Allocate host memory for input vector and bins unsigned int* host_input; unsigned int* host_bins; unsigned int* host_bins_cpu; host_input = (unsigned int*)malloc(input_bytes); host_bins = (unsigned int*)malloc(bin_bytes); host_bins_cpu = (unsigned int*)malloc(bin_bytes); // Allocate device memory for input vector and bins unsigned int* device_input; unsigned int* device_bins; checkCudaErrors(cudaMalloc((void**)&device_input, input_bytes)); checkCudaErrors(cudaMalloc((void**)&device_bins, bin_bytes)); // Initialize input vector with ints between 0~1024 srand((unsigned int)time(NULL)); // Assigns seed to make random numbers change for (int i = 0; i < input_size; i++) { host_input[i] = rand() % 1024; } // Initialize bins with 0s for (int i = 0; i < num_bins; i++) { host_bins[i] = 0; } for (int i = 0; i < num_bins; i++) { host_bins_cpu[i] = 0; } //// Print input vector //printf("\nInput vector:\n"); //for (int i = 0; i < input_size; i++) { // printf("%d\t", host_input[i]); //} // Copy matrix values from host to device checkCudaErrors(cudaMemcpy(device_input, host_input, input_bytes, cudaMemcpyHostToDevice)); // dest, source, size in bytes, direction of transfer checkCudaErrors(cudaMemcpy(device_bins, host_bins, bin_bytes, cudaMemcpyHostToDevice)); // dest, source, size in bytes, direction of transfer // Set Grid and Block sizes int block_size = 16; // Threads per block int grid_size = ceil(input_size / block_size) + 1; // Blocks per grid dim3 dim_block(block_size); dim3 dim_grid(grid_size); // Record the start event (for timing GPU calculations) cudaStream_t stream; cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); checkCudaErrors(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); checkCudaErrors(cudaStreamSynchronize(stream)); checkCudaErrors(cudaEventRecord(start, stream)); int nIter = 100; // How many times to run kernel printf("\nStarting Histogram Computation on GPU\n"); // Launch kernel (repeat nIter times so we can obtain average run time) // Leave the kernel you want to use un-commented and comment out the rest for (int i = 0; i < nIter; i++) { Histogram_GPU_1<<<dim_grid, dim_block>>>(device_input, device_bins, input_size, bin_size); //Histogram_GPU_2<<<dim_grid, dim_block>>>(device_input, device_bins, input_size, bin_size, num_bins); //Histogram_GPU_3<<<dim_grid, dim_block>>>(device_input, device_bins, input_size, bin_size, num_bins); } printf("\n\GPU Histogram Computation Complete\n"); // Record the stop event checkCudaErrors(cudaEventRecord(stop, stream)); // Wait for the stop event to complete checkCudaErrors(cudaEventSynchronize(stop)); float msecTotal = 0.0f; checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop)); // Compute and print the performance float msecPerHistogram = msecTotal / nIter; printf("\nGPU Histogram Computation took %.3f msec\n", msecPerHistogram); printf("\nThreads per block = %d, Blocks per grid = %d, Total threads = %d\n", block_size, grid_size, block_size * grid_size); // Copy matrix values from device to host checkCudaErrors(cudaMemcpy(host_bins, device_bins, bin_bytes, cudaMemcpyDeviceToHost)); // Make sure bins from GPU have correct values for (int i = 0; i < num_bins; i++) { host_bins[i] /= nIter; } //// Print GPU results //printf("\nGPU Results: \n"); //for (int i = 0; i < num_bins; i++) { // printf("\nBins %d = %u", i, host_bins[i]); //} //int sum_bins = 0; //for (int i = 0; i < num_bins; i++) { // sum_bins += host_bins[i]; //} //printf("\n\nSummation of all the bins = %d\n", sum_bins); //Start CPU timer double time_taken_cpu = 0.0; clock_t begin_cpu = clock(); // Calculate histogram on CPU printf("\nStarting Histogram Computation on CPU\n"); for (int i = 0; i < nIter; i++) { // Repeat CPU computation same amount of times as GPU computation Histogram_CPU(host_input, input_size, bin_size, host_bins_cpu); } printf("\nCPU Histogram Computation Complete\n"); clock_t end_cpu = clock(); time_taken_cpu += ((double)(end_cpu - begin_cpu) / CLOCKS_PER_SEC * 1000) / nIter; // in milliseconds printf("\nCPU Histogram Computation took %.3f msec\n", time_taken_cpu); // Make sure bins from CPU have correct values for (int i = 0; i < num_bins; i++) { host_bins_cpu[i] /= nIter; } //// Print CPU results //printf("\nCPU Results: \n"); //for (int i = 0; i < num_bins; i++) { // printf("\nBins %d = %u", i, host_bins_cpu[i]); //} //int sum_bins_cpu = 0; //for (int i = 0; i < num_bins; i++) { // sum_bins_cpu += host_bins_cpu[i]; //} //printf("\n\nSummation of all the bins = %d\n", sum_bins_cpu); // Check if GPU and CPU histograms match bool check = 0; for (int i = 0; i < num_bins; i++) { // For every value in the arrays if (host_bins[i] != host_bins_cpu[i]) { // Check if they match and if not set a flag check = 1; } } if (check == 1) { printf("\nGPU and CPU histograms do not match!\n"); } else { printf("\nGPU and CPU histograms match!\n"); } // Free memory in device cudaFree(device_input); cudaFree(device_bins); // Free memory in host free(host_input); free(host_bins); }
233375d8291a20013f8c213fa9c62313f9243800.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/strings/split/split.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/utilities/error.hpp> #include <strings/utilities.hpp> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <vector> #include <thrust/transform.h> namespace cudf { namespace strings { namespace detail { using string_index_pair = thrust::pair<const char*,size_type>; namespace { /** * @brief Common token counter for all split methods in this file. */ struct token_counter_fn { column_device_view const d_strings; string_view const d_delimiter; size_type tokens; // returns the number of possible tokens in each string __device__ size_type operator()(size_type idx) const { if( d_strings.is_null(idx) ) return 0; string_view d_str = d_strings.element<string_view>(idx); if( d_str.empty() ) return 1; size_type delim_count = 0; auto delim_length = d_delimiter.length(); auto pos = d_str.find(d_delimiter); while(pos >= 0) { ++delim_count; pos = d_str.find(d_delimiter, pos + delim_length); } size_type rtn = delim_count + 1; if((tokens > 0) && (rtn > tokens)) rtn = tokens; return rtn; } }; // // This will create new columns by splitting the array of strings vertically. // All the first tokens go in the first column, all the second tokens go in the second column, etc. // It is comparable to Pandas split with expand=True but the rows/columns are transposed. // Example: // import pandas as pd // pd_series = pd.Series(['', None, 'a_b', '_a_b_', '__aa__bb__', '_a__bbb___c', '_aa_b__ccc__']) // print(pd_series.str.split(pat='_', expand=True)) // 0 1 2 3 4 5 6 // 0 '' None None None None None None // 1 None None None None None None None // 2 a b None None None None None // 3 '' a b '' None None None // 4 '' '' aa '' bb '' '' // 5 '' a '' bbb '' '' c // 6 '' aa b '' ccc '' '' // // print(pd_series.str.split(pat='_', n=1, expand=True)) // 0 1 // 0 '' None // 1 None None // 2 a b // 3 '' a_b_ // 4 '' _aa__bb__ // 5 '' a__bbb___c // 6 '' aa_b__ccc__ // // print(pd_series.str.split(pat='_', n=2, expand=True)) // 0 1 2 // 0 '' None None // 1 None None None // 2 a b None // 3 '' a b_ // 4 '' aa__bb__ // 5 '' a _bbb___c // 6 '' aa b__ccc__ // struct split_tokenizer_fn { column_device_view const d_strings; // strings to split string_view const d_delimiter; // delimiter for split __device__ string_index_pair operator()(size_type idx, size_type col_idx, size_type column_count, size_type const* d_token_counts) const { // token_count already includes the max-split value size_type token_count = d_token_counts[idx]; if( col_idx >= token_count || d_strings.is_null(idx) ) return string_index_pair{nullptr,0}; string_view d_str = d_strings.element<string_view>(idx); auto delim_nchars = d_delimiter.length(); size_type spos = 0; size_type nchars = d_str.length(); size_type epos = nchars; // skip delimiters until we reach the col_idx or the token_count for( size_type c=0; c < (token_count-1); ++c ) { epos = d_str.find(d_delimiter,spos); if( c==col_idx ) // found our column break; spos = epos + delim_nchars; epos = nchars; } // this will be the string for this column string_index_pair result{d_str.data(),0}; // init to empty string if( spos < epos ) { spos = d_str.byte_offset(spos); // convert character pos epos = d_str.byte_offset(epos); // to byte offset result = string_index_pair{ d_str.data() + spos, (epos-spos) }; } return result; } }; /** * @brief Extracts a specific set of tokens from a strings column. * * This will perform the split starting at the end of each string. */ struct rsplit_tokenizer_fn { column_device_view const d_strings; // strings to split string_view const d_delimiter; // delimiter for split __device__ string_index_pair operator()(size_type idx, size_type col_idx, size_type column_count, size_type const* d_token_counts) const { // token_count already includes the max-split value size_type token_count = d_token_counts[idx]; if( col_idx >= token_count || d_strings.is_null(idx) ) return string_index_pair{nullptr,0}; string_view d_str = d_strings.element<string_view>(idx); auto delim_nchars = d_delimiter.length(); size_type spos = 0; size_type nchars = d_str.length(); size_type epos = nchars; // skip delimiters until we reach col-idx or token_count for( auto c=(token_count-1); c > 0; --c ) { spos = d_str.rfind(d_delimiter,0,epos); if( c==col_idx ) // found our column { spos += delim_nchars; // do not include delimiter break; } epos = spos; spos = 0; } // this will be the string for this column string_index_pair result{d_str.data(),0}; // init to empty string if( spos < epos ) { spos = d_str.byte_offset(spos); // convert char pos epos = d_str.byte_offset(epos); // to byte offset result = string_index_pair{ d_str.data() + spos, (epos-spos) }; } return result; } }; /** * @brief Special-case token counter for whitespace delimiter. * * Leading and trailing and duplicate delimiters are ignored. */ struct whitespace_token_counter_fn { column_device_view const d_strings; size_type tokens; // maximum number of tokens // count the 'words' only between non-whitespace characters __device__ size_type operator()(size_type idx) const { if( d_strings.is_null(idx) ) return 0; string_view d_str = d_strings.element<string_view>(idx); size_type dcount = 0; bool spaces = true; // need to treat a run of whitespace as a single delimiter auto itr = d_str.begin(); while( itr != d_str.end() ) { char_utf8 ch = *itr; if( spaces == (ch <= ' ') ) itr++; else { dcount += static_cast<size_type>(spaces); spaces = !spaces; } } if( tokens && (dcount > tokens) ) dcount = tokens; if( dcount==0 ) dcount = 1; // always allow empty string return dcount; } }; // // This is the whitespace-delimiter version of the column split function. // Like the one above, it can be compared to Pandas split with expand=True but // with the rows/columns transposed. // // import pandas as pd // pd_series = pd.Series(['', None, 'a b', ' a b ', ' aa bb ', ' a bbb c', ' aa b ccc ']) // print(pd_series.str.split(pat=None, expand=True)) // 0 1 2 // 0 None None None // 1 None None None // 2 a b None // 3 a b None // 4 aa bb None // 5 a bbb c // 6 aa b ccc // // print(pd_series.str.split(pat=None, n=1, expand=True)) // 0 1 // 0 None None // 1 None None // 2 a b // 3 a b // 4 aa bb // 5 a bbb c // 6 aa b ccc // // print(pd_series.str.split(pat=None, n=2, expand=True)) // 0 1 2 // 0 None None None // 1 None None None // 2 a b None // 3 a b None // 4 aa bb None // 5 a bbb c // 6 aa b ccc // // Like the split_record method, there are no empty strings here. // struct whitespace_split_tokenizer_fn { column_device_view const d_strings; // strings to split size_type tokens; // maximum number of tokens __device__ string_index_pair operator()(size_type idx, size_type col_idx, size_type column_count, size_type const* d_token_counts) const { size_type token_count = d_token_counts[idx]; if( col_idx >= token_count || d_strings.is_null(idx) ) return string_index_pair{nullptr,0}; string_view d_str = d_strings.element<string_view>(idx); size_type c = 0; size_type nchars = d_str.length(); size_type spos = 0; size_type epos = nchars; bool spaces = true; // need to treat a run of whitespace as a single delimiter for( size_type pos=0; pos < nchars; ++pos ) { char_utf8 ch = d_str[pos]; if( spaces == (ch <= ' ') ) { if( spaces ) spos = pos+1; else epos = pos+1; continue; } if( !spaces ) { epos = nchars; if( (c+1)==tokens ) // hit max tokens break; epos = pos; if( c==col_idx ) // found our column break; spos = pos+1; epos = nchars; ++c; } spaces = !spaces; } // this is the string for this column string_index_pair result{nullptr,0}; // init to null string if( spos < epos ) { spos = d_str.byte_offset(spos); // convert char pos epos = d_str.byte_offset(epos); // to byte offset result = string_index_pair{ d_str.data() + spos, (epos-spos) }; } return result; } }; /** * @brief Extracts a specific set of tokens from a strings column * using whitespace as delimiter but splitting starts from the end * of each string. */ struct whitespace_rsplit_tokenizer_fn { column_device_view const d_strings; // strings to split size_type tokens; // maximum number of tokens __device__ string_index_pair operator()(size_type idx, size_type col_idx, size_type column_count, size_type const* d_token_counts) const { size_type token_count = d_token_counts[idx]; if( col_idx >= token_count || d_strings.is_null(idx) ) return string_index_pair{nullptr,0}; string_view d_str = d_strings.element<string_view>(idx); size_type c = (token_count-1); size_type nchars = d_str.length(); size_type spos = 0; size_type epos = nchars; bool spaces = true; // need to treat a run of whitespace as a single delimiter for( int pos=nchars; pos > 0; --pos ) { char_utf8 ch = d_str[pos-1]; if( spaces == (ch <= ' ') ) { if( spaces ) epos = pos-1; else spos = pos-1; continue; } if( !spaces ) { spos = 0; if( (column_count-c)==tokens ) // hit max tokens break; spos = pos; if( c==col_idx ) // found our column break; epos = pos-1; spos = 0; --c; } spaces = !spaces; } // this is the string for this column string_index_pair result{nullptr,0}; // init to null string if( spos < epos ) { spos = d_str.byte_offset(spos); // convert char pos epos = d_str.byte_offset(epos); // to byte offset result = string_index_pair{ d_str.data() + spos, (epos-spos) }; } return result; } }; // align all column size allocations to this boundary so that all output column buffers // start at that alignment. static constexpr size_type split_align = 64; __device__ size_type compute_memory_size( size_type token_count, size_type token_size_sum) { return cudf::experimental::detail::round_up_pow2(token_size_sum, split_align) + cudf::experimental::detail::round_up_pow2( (token_count + 1) * static_cast<size_type>(sizeof(size_type)), split_align); } struct copy_info { size_type idx{}; size_type token_count{}; size_type token_size_sum{}; void* memory_ptr{}; }; enum class Dir { FORWARD, BACKWARD }; /** * @brief Compute the number of tokens, the total byte sizes of the tokens, and * required memory size for the `idx'th` string element of `d_strings`. */ template <Dir dir> struct token_reader_fn { column_device_view const d_strings; // strings to split string_view const d_delimiter; // delimiter for split size_type const max_tokens = std::numeric_limits<size_type>::max(); bool const has_validity = false; template <bool last> __device__ size_type compute_token_char_bytes( string_view const& d_str, size_type start_pos, size_type end_pos, size_type delimiter_pos) const { if (last) { return dir == Dir::FORWARD ? d_str.byte_offset(end_pos) - d_str.byte_offset(start_pos) : d_str.byte_offset(end_pos); } else { return dir == Dir::FORWARD ? d_str.byte_offset(delimiter_pos) - d_str.byte_offset(start_pos) : d_str.byte_offset(end_pos) - d_str.byte_offset(delimiter_pos + d_delimiter.length()); } } // returns a tuple of token count, sum of token sizes in bytes, and required // memory block size __device__ thrust::tuple<size_type, size_type, size_type> operator()(size_type idx) const { if (has_validity && d_strings.is_null(idx)) { return thrust::make_tuple<size_type, size_type, size_type>(0, 0, 0); } auto const d_str = d_strings.element<string_view>(idx); size_type token_count = 0; size_type token_size_sum = 0; size_type start_pos = 0; // updates only if moving forward auto end_pos = d_str.length(); // updates only if moving backward while (token_count < max_tokens - 1) { auto const delimiter_pos = dir == Dir::FORWARD ? d_str.find(d_delimiter, start_pos) : d_str.rfind(d_delimiter, start_pos, end_pos); if (delimiter_pos != -1) { token_count++; token_size_sum += compute_token_char_bytes<false>( d_str, start_pos, end_pos, delimiter_pos); if (dir == Dir::FORWARD) { start_pos = delimiter_pos + d_delimiter.length(); } else { end_pos = delimiter_pos; } } else { break; } } token_count++; token_size_sum += compute_token_char_bytes<true>(d_str, start_pos, end_pos, -1); auto const memory_size = compute_memory_size(token_count, token_size_sum); return thrust::make_tuple<size_type, size_type, size_type>( token_count, token_size_sum, memory_size); } }; /** * @brief Copy the tokens from the `idx'th` string element of `d_strings` to * the contiguous memory buffer. */ template <Dir dir> struct token_copier_fn { column_device_view const d_strings; // strings to split string_view const d_delimiter; // delimiter for split bool const has_validity = false; template <bool last> __device__ thrust::pair<size_type, size_type> compute_src_byte_offset_and_token_char_bytes( string_view const& d_str, size_type start_pos, size_type end_pos, size_type delimiter_pos) const { if (last) { auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(start_pos) : 0; auto const token_char_bytes = dir == Dir::FORWARD ? d_str.byte_offset(end_pos) - src_byte_offset : d_str.byte_offset(end_pos); return thrust::make_pair<size_type, size_type>( src_byte_offset, token_char_bytes); } else { auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(start_pos) : d_str.byte_offset(delimiter_pos + d_delimiter.length()); auto const token_char_bytes = dir == Dir::FORWARD ? d_str.byte_offset(delimiter_pos) - src_byte_offset : d_str.byte_offset(end_pos) - src_byte_offset; return thrust::make_pair<size_type, size_type>( src_byte_offset, token_char_bytes); } } __device__ void operator()(copy_info const info) const { if (info.token_count == 0) { return; } auto memory_ptr = static_cast<char*>(info.memory_ptr); auto const char_buf_size = cudf::experimental::detail::round_up_pow2(info.token_size_sum, split_align); auto const char_buf_ptr = memory_ptr; memory_ptr += char_buf_size; auto const offset_buf_ptr = reinterpret_cast<size_type*>(memory_ptr); auto const d_str = d_strings.element<string_view>(info.idx); size_type token_idx = 0; size_type char_bytes_copied = 0; size_type start_pos = 0; // updates only if moving forward auto end_pos = d_str.length(); // updates only if moving backward while (token_idx < info.token_count - 1) { auto const delimiter_pos = dir == Dir::FORWARD ? d_str.find(d_delimiter, start_pos) : d_str.rfind(d_delimiter, start_pos, end_pos); if (delimiter_pos != -1) { auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<false>( d_str, start_pos, end_pos, delimiter_pos); if (dir == Dir::FORWARD) { thrust::copy( thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { auto const char_buf_offset = info.token_size_sum - char_bytes_copied - offset_size_pair.second; thrust::copy( thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_buf_offset); offset_buf_ptr[info.token_count - 1 - token_idx] = char_buf_offset; } token_idx++; char_bytes_copied += offset_size_pair.second; if (dir == Dir::FORWARD) { start_pos = delimiter_pos + d_delimiter.length(); } else { end_pos = delimiter_pos; } } else { break; } } auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<true>( d_str, start_pos, end_pos, -1); if (dir == Dir::FORWARD) { thrust::copy( thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { thrust::copy(thrust::seq, d_str.data(), d_str.data() + offset_size_pair.second, char_buf_ptr); offset_buf_ptr[0] = 0; } offset_buf_ptr[info.token_count] = info.token_size_sum; } }; /** * @brief Compute the number of tokens, the total byte sizes of the tokens, and * required memory size for the `idx'th` string element of `d_strings`. */ template <Dir dir> struct whitespace_token_reader_fn { column_device_view const d_strings; // strings to split size_type const max_tokens = std::numeric_limits<size_type>::max(); bool const has_validity = false; template <bool last> __device__ size_type compute_token_char_bytes( string_view const& d_str, size_type cur_pos, size_type to_token_pos) const { if (last) { return dir == Dir::FORWARD ? d_str.byte_offset(d_str.length()) - d_str.byte_offset(to_token_pos) : d_str.byte_offset(to_token_pos + 1) - d_str.byte_offset(0); } else { return dir == Dir::FORWARD ? d_str.byte_offset(cur_pos) - d_str.byte_offset(to_token_pos) : d_str.byte_offset(to_token_pos + 1) - d_str.byte_offset(cur_pos + 1); } } __device__ thrust::tuple<size_type, size_type, size_type> operator()(size_type idx) const { if (has_validity && d_strings.is_null(idx)) { return thrust::make_tuple<size_type, size_type, size_type>(0, 0, 0); } auto const d_str = d_strings.element<string_view>(idx); size_type token_count = 0; size_type token_size_sum = 0; auto spaces = true; auto reached_max_tokens = false; size_type to_token_pos = 0; for (size_type i = 0; i < d_str.length(); ++i) { auto const cur_pos = dir == Dir::FORWARD ? i : d_str.length() - 1 - i; auto const ch = d_str[cur_pos]; if (spaces != (ch <= ' ')) { if (spaces) { // from whitespace(s) to a new token to_token_pos = cur_pos; } else { // from a token to whiltespace(s) if (token_count < max_tokens - 1) { token_count++; token_size_sum += compute_token_char_bytes<false>(d_str, cur_pos, to_token_pos); } else { reached_max_tokens = true; break; } } spaces = !spaces; } } if (reached_max_tokens || !spaces) { token_count++; token_size_sum += compute_token_char_bytes<true>(d_str, -1, to_token_pos); } if (token_count == 0) { // note that pandas.Series.str.split("", pat=" ") // returns one token (i.e. "") while // pandas.Series.str.split("") returns 0 token. return thrust::make_tuple<size_type, size_type, size_type>(0, 0, 0); } auto const memory_size = compute_memory_size(token_count, token_size_sum); return thrust::make_tuple<size_type, size_type, size_type>( token_count, token_size_sum, memory_size); } }; /** * @brief Copy the tokens from the `idx'th` string element of `d_strings` to * the contiguous memory buffer. */ template <Dir dir> struct whitespace_token_copier_fn { column_device_view const d_strings; // strings to split bool const has_validity = false; template <bool last> __device__ thrust::pair<size_type, size_type> compute_src_byte_offset_and_token_char_bytes( string_view const& d_str, size_type cur_pos, size_type to_token_pos, size_type remaining_bytes) const { if (last) { auto const token_char_bytes = remaining_bytes; auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(to_token_pos) : d_str.byte_offset(to_token_pos + 1) - token_char_bytes; return thrust::make_pair<size_type, size_type>( src_byte_offset, token_char_bytes); } else { auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(to_token_pos) : d_str.byte_offset(cur_pos + 1); auto const token_char_bytes = dir == Dir::FORWARD ? d_str.byte_offset(cur_pos) - src_byte_offset : d_str.byte_offset(to_token_pos + 1) - src_byte_offset; return thrust::make_pair<size_type, size_type>( src_byte_offset, token_char_bytes); } } __device__ void operator()(copy_info const info) const { if (info.token_count == 0) { return; } auto memory_ptr = static_cast<char*>(info.memory_ptr); auto const char_buf_size = cudf::experimental::detail::round_up_pow2(info.token_size_sum, split_align); auto const char_buf_ptr = memory_ptr; memory_ptr += char_buf_size; auto const offset_buf_ptr = reinterpret_cast<size_type*>(memory_ptr); auto const d_str = d_strings.element<string_view>(info.idx); size_type token_idx = 0; size_type char_bytes_copied = 0; auto spaces = true; size_type to_token_pos = 0; for (size_type i = 0; i < d_str.length(); ++i) { auto const cur_pos = dir == Dir::FORWARD ? i : d_str.length() - 1 - i; auto const ch = d_str[cur_pos]; if (spaces != (ch <= ' ')) { if (spaces) { // from whitespace(s) to a new token to_token_pos = cur_pos; } else { // from a token to whiltespace(s) if (token_idx < info.token_count - 1) { auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<false>( d_str, cur_pos, to_token_pos, info.token_size_sum - char_bytes_copied); if (dir == Dir::FORWARD) { thrust::copy( thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { auto const char_buf_offset = info.token_size_sum - char_bytes_copied - offset_size_pair.second; thrust::copy( thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_buf_offset); offset_buf_ptr[info.token_count - 1 - token_idx] = char_buf_offset; } token_idx++; char_bytes_copied += offset_size_pair.second; } else { break; } } spaces = !spaces; } } if (token_idx < info.token_count) { auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<true>( d_str, -1, to_token_pos, info.token_size_sum - char_bytes_copied); if (dir == Dir::FORWARD) { thrust::copy( thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { thrust::copy( thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr); offset_buf_ptr[0] = 0; } } offset_buf_ptr[info.token_count] = info.token_size_sum; } }; // Generic split function used by split and rsplit template<typename TokenCounter, typename Tokenizer> std::unique_ptr<experimental::table> split_fn( size_type strings_count, TokenCounter counter, Tokenizer tokenizer, rmm::mr::device_memory_resource* mr, hipStream_t stream ) { auto execpol = rmm::exec_policy(stream); // compute the number of tokens per string size_type columns_count = 0; rmm::device_vector<size_type> token_counts(strings_count); auto d_token_counts = token_counts.data().get(); if( strings_count > 0 ) { thrust::transform( execpol->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), d_token_counts, counter ); // column count is the maximum number of tokens for any string columns_count = *thrust::max_element(execpol->on(stream), token_counts.begin(), token_counts.end() ); } std::vector<std::unique_ptr<column>> results; // boundary case: if no columns, return one null column (issue #119) if( columns_count==0 ) { results.push_back(std::make_unique<column>( data_type{STRING}, strings_count, rmm::device_buffer{0,stream,mr}, // no data create_null_mask(strings_count, ALL_NULL, stream, mr), strings_count )); } // Create each column. // Build a vector of pair<char*,int>'s' for each column. // Each pair points to a string for this column for each row. // Create the strings column using the strings factory. for( size_type col=0; col < columns_count; ++col ) { rmm::device_vector<string_index_pair> indexes(strings_count); string_index_pair* d_indexes = indexes.data().get(); thrust::transform(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), d_indexes, [tokenizer, col, columns_count, d_token_counts] __device__ (size_type idx) { return tokenizer(idx,col,columns_count, d_token_counts); }); auto column = make_strings_column(indexes,stream,mr); results.emplace_back(std::move(column)); } return std::make_unique<experimental::table>(std::move(results)); } // Generic split function used by split_record and rsplit_record template<typename TokenReader, typename TokenCopier> contiguous_split_record_result contiguous_split_record_fn( strings_column_view const& strings, TokenReader reader, TokenCopier copier, rmm::mr::device_memory_resource* mr, hipStream_t stream) { // read each string element of the input column to count the number of tokens // and compute the memory offsets auto strings_count = strings.size(); rmm::device_vector<size_type> d_token_counts(strings_count); rmm::device_vector<size_type> d_token_size_sums(strings_count); rmm::device_vector<size_type> d_memory_offsets(strings_count + 1); thrust::transform( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(strings_count), thrust::make_zip_iterator( thrust::make_tuple( d_token_counts.begin(), d_token_size_sums.begin(), d_memory_offsets.begin())), reader); thrust::exclusive_scan( rmm::exec_policy(stream)->on(stream), d_memory_offsets.begin(), d_memory_offsets.end(), d_memory_offsets.begin()); // allocate and copy thrust::host_vector<size_type> h_token_counts = d_token_counts; thrust::host_vector<size_type> h_token_size_sums = d_token_size_sums; thrust::host_vector<size_type> h_memory_offsets = d_memory_offsets; auto memory_size = h_memory_offsets.back(); auto all_data_ptr = std::make_unique<rmm::device_buffer>(memory_size, stream, mr); auto d_all_data_ptr = reinterpret_cast<char*>(all_data_ptr->data()); auto d_token_counts_ptr = d_token_counts.data().get(); auto d_memory_offsets_ptr = d_memory_offsets.data().get(); auto d_token_size_sums_ptr = d_token_size_sums.data().get(); auto copy_info_begin = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [d_all_data_ptr, d_token_counts_ptr, d_memory_offsets_ptr, d_token_size_sums_ptr] __device__ (auto i) { return copy_info{i, d_token_counts_ptr[i], d_token_size_sums_ptr[i], d_all_data_ptr + d_memory_offsets_ptr[i]}; }); thrust::for_each( rmm::exec_policy(stream)->on(stream), copy_info_begin, copy_info_begin + strings_count, copier); // update column_view objects std::vector<column_view> column_views{}; for (size_type i = 0; i < strings_count; ++i) { if (h_token_counts[i] == 0) { column_views.emplace_back(strings.parent().type(), 0, nullptr); } else { auto memory_ptr = d_all_data_ptr + h_memory_offsets[i]; auto char_buf_size = cudf::util::round_up_safe(h_token_size_sums[i], split_align); auto char_buf_ptr = memory_ptr; memory_ptr += char_buf_size; auto offset_buf_ptr = reinterpret_cast<size_type*>(memory_ptr); column_views.emplace_back( strings.parent().type(), h_token_counts[i], nullptr, nullptr, UNKNOWN_NULL_COUNT, 0, std::vector<column_view>{ column_view( strings.offsets().type(), h_token_counts[i] + 1, offset_buf_ptr), column_view( strings.chars().type(), h_token_size_sums[i], char_buf_ptr)}); } } CUDA_TRY(hipStreamSynchronize(stream)); return contiguous_split_record_result{std::move(column_views), std::move(all_data_ptr)}; } } // namespace std::unique_ptr<experimental::table> split( strings_column_view const& strings, string_scalar const& delimiter = string_scalar(""), size_type maxsplit=-1, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), hipStream_t stream = 0 ) { CUDF_EXPECTS( delimiter.is_valid(), "Parameter delimiter must be valid"); size_type max_tokens = 0; if( maxsplit > 0 ) max_tokens = maxsplit + 1; // makes consistent with Pandas auto strings_column = column_device_view::create(strings.parent(),stream); if( delimiter.size()==0 ) { return split_fn( strings.size(), whitespace_token_counter_fn{*strings_column,max_tokens}, whitespace_split_tokenizer_fn{*strings_column,max_tokens}, mr, stream); } string_view d_delimiter( delimiter.data(), delimiter.size() ); return split_fn( strings.size(), token_counter_fn{*strings_column,d_delimiter,max_tokens}, split_tokenizer_fn{*strings_column,d_delimiter}, mr, stream); } std::unique_ptr<experimental::table> rsplit( strings_column_view const& strings, string_scalar const& delimiter = string_scalar(""), size_type maxsplit=-1, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), hipStream_t stream = 0 ) { CUDF_EXPECTS( delimiter.is_valid(), "Parameter delimiter must be valid"); size_type max_tokens = 0; if( maxsplit > 0 ) max_tokens = maxsplit + 1; // makes consistent with Pandas auto strings_column = column_device_view::create(strings.parent(),stream); if( delimiter.size()==0 ) { return split_fn( strings.size(), whitespace_token_counter_fn{*strings_column,max_tokens}, whitespace_rsplit_tokenizer_fn{*strings_column,max_tokens}, mr, stream); } string_view d_delimiter( delimiter.data(), delimiter.size() ); return split_fn( strings.size(), token_counter_fn{*strings_column,d_delimiter,max_tokens}, rsplit_tokenizer_fn{*strings_column,d_delimiter}, mr, stream); } template <Dir dir> contiguous_split_record_result contiguous_split_record( strings_column_view const& strings, string_scalar const& delimiter = string_scalar(""), size_type maxsplit = -1, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), hipStream_t stream = 0) { CUDF_EXPECTS(delimiter.is_valid(), "Parameter delimiter must be valid"); // makes consistent with Pandas size_type max_tokens = maxsplit > 0 ? maxsplit + 1 : std::numeric_limits<size_type>::max(); auto has_validity = strings.parent().nullable(); auto d_strings_column_ptr = column_device_view::create(strings.parent(), stream); if (delimiter.size() == 0) { return contiguous_split_record_fn( strings, whitespace_token_reader_fn<dir>{ *d_strings_column_ptr, max_tokens, has_validity}, whitespace_token_copier_fn<dir>{*d_strings_column_ptr, has_validity}, mr, stream); } else { string_view d_delimiter(delimiter.data(), delimiter.size()); return contiguous_split_record_fn( strings, token_reader_fn<dir>{ *d_strings_column_ptr, d_delimiter, max_tokens, has_validity}, token_copier_fn<dir>{*d_strings_column_ptr, d_delimiter, has_validity}, mr, stream); } } } // namespace detail // external APIs std::unique_ptr<experimental::table> split( strings_column_view const& strings, string_scalar const& delimiter, size_type maxsplit, rmm::mr::device_memory_resource* mr ) { return detail::split( strings, delimiter, maxsplit, mr ); } std::unique_ptr<experimental::table> rsplit( strings_column_view const& strings, string_scalar const& delimiter, size_type maxsplit, rmm::mr::device_memory_resource* mr) { return detail::rsplit( strings, delimiter, maxsplit, mr ); } contiguous_split_record_result contiguous_split_record( strings_column_view const& strings, string_scalar const& delimiter, size_type maxsplit, rmm::mr::device_memory_resource* mr) { return detail::contiguous_split_record<detail::Dir::FORWARD>(strings, delimiter, maxsplit, mr, 0); } contiguous_split_record_result contiguous_rsplit_record( strings_column_view const& strings, string_scalar const& delimiter, size_type maxsplit, rmm::mr::device_memory_resource* mr) { return detail::contiguous_split_record<detail::Dir::BACKWARD>(strings, delimiter, maxsplit, mr, 0); } } // namespace strings } // namespace cudf
233375d8291a20013f8c213fa9c62313f9243800.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/strings/split/split.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/utilities/error.hpp> #include <strings/utilities.hpp> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <vector> #include <thrust/transform.h> namespace cudf { namespace strings { namespace detail { using string_index_pair = thrust::pair<const char*,size_type>; namespace { /** * @brief Common token counter for all split methods in this file. */ struct token_counter_fn { column_device_view const d_strings; string_view const d_delimiter; size_type tokens; // returns the number of possible tokens in each string __device__ size_type operator()(size_type idx) const { if( d_strings.is_null(idx) ) return 0; string_view d_str = d_strings.element<string_view>(idx); if( d_str.empty() ) return 1; size_type delim_count = 0; auto delim_length = d_delimiter.length(); auto pos = d_str.find(d_delimiter); while(pos >= 0) { ++delim_count; pos = d_str.find(d_delimiter, pos + delim_length); } size_type rtn = delim_count + 1; if((tokens > 0) && (rtn > tokens)) rtn = tokens; return rtn; } }; // // This will create new columns by splitting the array of strings vertically. // All the first tokens go in the first column, all the second tokens go in the second column, etc. // It is comparable to Pandas split with expand=True but the rows/columns are transposed. // Example: // import pandas as pd // pd_series = pd.Series(['', None, 'a_b', '_a_b_', '__aa__bb__', '_a__bbb___c', '_aa_b__ccc__']) // print(pd_series.str.split(pat='_', expand=True)) // 0 1 2 3 4 5 6 // 0 '' None None None None None None // 1 None None None None None None None // 2 a b None None None None None // 3 '' a b '' None None None // 4 '' '' aa '' bb '' '' // 5 '' a '' bbb '' '' c // 6 '' aa b '' ccc '' '' // // print(pd_series.str.split(pat='_', n=1, expand=True)) // 0 1 // 0 '' None // 1 None None // 2 a b // 3 '' a_b_ // 4 '' _aa__bb__ // 5 '' a__bbb___c // 6 '' aa_b__ccc__ // // print(pd_series.str.split(pat='_', n=2, expand=True)) // 0 1 2 // 0 '' None None // 1 None None None // 2 a b None // 3 '' a b_ // 4 '' aa__bb__ // 5 '' a _bbb___c // 6 '' aa b__ccc__ // struct split_tokenizer_fn { column_device_view const d_strings; // strings to split string_view const d_delimiter; // delimiter for split __device__ string_index_pair operator()(size_type idx, size_type col_idx, size_type column_count, size_type const* d_token_counts) const { // token_count already includes the max-split value size_type token_count = d_token_counts[idx]; if( col_idx >= token_count || d_strings.is_null(idx) ) return string_index_pair{nullptr,0}; string_view d_str = d_strings.element<string_view>(idx); auto delim_nchars = d_delimiter.length(); size_type spos = 0; size_type nchars = d_str.length(); size_type epos = nchars; // skip delimiters until we reach the col_idx or the token_count for( size_type c=0; c < (token_count-1); ++c ) { epos = d_str.find(d_delimiter,spos); if( c==col_idx ) // found our column break; spos = epos + delim_nchars; epos = nchars; } // this will be the string for this column string_index_pair result{d_str.data(),0}; // init to empty string if( spos < epos ) { spos = d_str.byte_offset(spos); // convert character pos epos = d_str.byte_offset(epos); // to byte offset result = string_index_pair{ d_str.data() + spos, (epos-spos) }; } return result; } }; /** * @brief Extracts a specific set of tokens from a strings column. * * This will perform the split starting at the end of each string. */ struct rsplit_tokenizer_fn { column_device_view const d_strings; // strings to split string_view const d_delimiter; // delimiter for split __device__ string_index_pair operator()(size_type idx, size_type col_idx, size_type column_count, size_type const* d_token_counts) const { // token_count already includes the max-split value size_type token_count = d_token_counts[idx]; if( col_idx >= token_count || d_strings.is_null(idx) ) return string_index_pair{nullptr,0}; string_view d_str = d_strings.element<string_view>(idx); auto delim_nchars = d_delimiter.length(); size_type spos = 0; size_type nchars = d_str.length(); size_type epos = nchars; // skip delimiters until we reach col-idx or token_count for( auto c=(token_count-1); c > 0; --c ) { spos = d_str.rfind(d_delimiter,0,epos); if( c==col_idx ) // found our column { spos += delim_nchars; // do not include delimiter break; } epos = spos; spos = 0; } // this will be the string for this column string_index_pair result{d_str.data(),0}; // init to empty string if( spos < epos ) { spos = d_str.byte_offset(spos); // convert char pos epos = d_str.byte_offset(epos); // to byte offset result = string_index_pair{ d_str.data() + spos, (epos-spos) }; } return result; } }; /** * @brief Special-case token counter for whitespace delimiter. * * Leading and trailing and duplicate delimiters are ignored. */ struct whitespace_token_counter_fn { column_device_view const d_strings; size_type tokens; // maximum number of tokens // count the 'words' only between non-whitespace characters __device__ size_type operator()(size_type idx) const { if( d_strings.is_null(idx) ) return 0; string_view d_str = d_strings.element<string_view>(idx); size_type dcount = 0; bool spaces = true; // need to treat a run of whitespace as a single delimiter auto itr = d_str.begin(); while( itr != d_str.end() ) { char_utf8 ch = *itr; if( spaces == (ch <= ' ') ) itr++; else { dcount += static_cast<size_type>(spaces); spaces = !spaces; } } if( tokens && (dcount > tokens) ) dcount = tokens; if( dcount==0 ) dcount = 1; // always allow empty string return dcount; } }; // // This is the whitespace-delimiter version of the column split function. // Like the one above, it can be compared to Pandas split with expand=True but // with the rows/columns transposed. // // import pandas as pd // pd_series = pd.Series(['', None, 'a b', ' a b ', ' aa bb ', ' a bbb c', ' aa b ccc ']) // print(pd_series.str.split(pat=None, expand=True)) // 0 1 2 // 0 None None None // 1 None None None // 2 a b None // 3 a b None // 4 aa bb None // 5 a bbb c // 6 aa b ccc // // print(pd_series.str.split(pat=None, n=1, expand=True)) // 0 1 // 0 None None // 1 None None // 2 a b // 3 a b // 4 aa bb // 5 a bbb c // 6 aa b ccc // // print(pd_series.str.split(pat=None, n=2, expand=True)) // 0 1 2 // 0 None None None // 1 None None None // 2 a b None // 3 a b None // 4 aa bb None // 5 a bbb c // 6 aa b ccc // // Like the split_record method, there are no empty strings here. // struct whitespace_split_tokenizer_fn { column_device_view const d_strings; // strings to split size_type tokens; // maximum number of tokens __device__ string_index_pair operator()(size_type idx, size_type col_idx, size_type column_count, size_type const* d_token_counts) const { size_type token_count = d_token_counts[idx]; if( col_idx >= token_count || d_strings.is_null(idx) ) return string_index_pair{nullptr,0}; string_view d_str = d_strings.element<string_view>(idx); size_type c = 0; size_type nchars = d_str.length(); size_type spos = 0; size_type epos = nchars; bool spaces = true; // need to treat a run of whitespace as a single delimiter for( size_type pos=0; pos < nchars; ++pos ) { char_utf8 ch = d_str[pos]; if( spaces == (ch <= ' ') ) { if( spaces ) spos = pos+1; else epos = pos+1; continue; } if( !spaces ) { epos = nchars; if( (c+1)==tokens ) // hit max tokens break; epos = pos; if( c==col_idx ) // found our column break; spos = pos+1; epos = nchars; ++c; } spaces = !spaces; } // this is the string for this column string_index_pair result{nullptr,0}; // init to null string if( spos < epos ) { spos = d_str.byte_offset(spos); // convert char pos epos = d_str.byte_offset(epos); // to byte offset result = string_index_pair{ d_str.data() + spos, (epos-spos) }; } return result; } }; /** * @brief Extracts a specific set of tokens from a strings column * using whitespace as delimiter but splitting starts from the end * of each string. */ struct whitespace_rsplit_tokenizer_fn { column_device_view const d_strings; // strings to split size_type tokens; // maximum number of tokens __device__ string_index_pair operator()(size_type idx, size_type col_idx, size_type column_count, size_type const* d_token_counts) const { size_type token_count = d_token_counts[idx]; if( col_idx >= token_count || d_strings.is_null(idx) ) return string_index_pair{nullptr,0}; string_view d_str = d_strings.element<string_view>(idx); size_type c = (token_count-1); size_type nchars = d_str.length(); size_type spos = 0; size_type epos = nchars; bool spaces = true; // need to treat a run of whitespace as a single delimiter for( int pos=nchars; pos > 0; --pos ) { char_utf8 ch = d_str[pos-1]; if( spaces == (ch <= ' ') ) { if( spaces ) epos = pos-1; else spos = pos-1; continue; } if( !spaces ) { spos = 0; if( (column_count-c)==tokens ) // hit max tokens break; spos = pos; if( c==col_idx ) // found our column break; epos = pos-1; spos = 0; --c; } spaces = !spaces; } // this is the string for this column string_index_pair result{nullptr,0}; // init to null string if( spos < epos ) { spos = d_str.byte_offset(spos); // convert char pos epos = d_str.byte_offset(epos); // to byte offset result = string_index_pair{ d_str.data() + spos, (epos-spos) }; } return result; } }; // align all column size allocations to this boundary so that all output column buffers // start at that alignment. static constexpr size_type split_align = 64; __device__ size_type compute_memory_size( size_type token_count, size_type token_size_sum) { return cudf::experimental::detail::round_up_pow2(token_size_sum, split_align) + cudf::experimental::detail::round_up_pow2( (token_count + 1) * static_cast<size_type>(sizeof(size_type)), split_align); } struct copy_info { size_type idx{}; size_type token_count{}; size_type token_size_sum{}; void* memory_ptr{}; }; enum class Dir { FORWARD, BACKWARD }; /** * @brief Compute the number of tokens, the total byte sizes of the tokens, and * required memory size for the `idx'th` string element of `d_strings`. */ template <Dir dir> struct token_reader_fn { column_device_view const d_strings; // strings to split string_view const d_delimiter; // delimiter for split size_type const max_tokens = std::numeric_limits<size_type>::max(); bool const has_validity = false; template <bool last> __device__ size_type compute_token_char_bytes( string_view const& d_str, size_type start_pos, size_type end_pos, size_type delimiter_pos) const { if (last) { return dir == Dir::FORWARD ? d_str.byte_offset(end_pos) - d_str.byte_offset(start_pos) : d_str.byte_offset(end_pos); } else { return dir == Dir::FORWARD ? d_str.byte_offset(delimiter_pos) - d_str.byte_offset(start_pos) : d_str.byte_offset(end_pos) - d_str.byte_offset(delimiter_pos + d_delimiter.length()); } } // returns a tuple of token count, sum of token sizes in bytes, and required // memory block size __device__ thrust::tuple<size_type, size_type, size_type> operator()(size_type idx) const { if (has_validity && d_strings.is_null(idx)) { return thrust::make_tuple<size_type, size_type, size_type>(0, 0, 0); } auto const d_str = d_strings.element<string_view>(idx); size_type token_count = 0; size_type token_size_sum = 0; size_type start_pos = 0; // updates only if moving forward auto end_pos = d_str.length(); // updates only if moving backward while (token_count < max_tokens - 1) { auto const delimiter_pos = dir == Dir::FORWARD ? d_str.find(d_delimiter, start_pos) : d_str.rfind(d_delimiter, start_pos, end_pos); if (delimiter_pos != -1) { token_count++; token_size_sum += compute_token_char_bytes<false>( d_str, start_pos, end_pos, delimiter_pos); if (dir == Dir::FORWARD) { start_pos = delimiter_pos + d_delimiter.length(); } else { end_pos = delimiter_pos; } } else { break; } } token_count++; token_size_sum += compute_token_char_bytes<true>(d_str, start_pos, end_pos, -1); auto const memory_size = compute_memory_size(token_count, token_size_sum); return thrust::make_tuple<size_type, size_type, size_type>( token_count, token_size_sum, memory_size); } }; /** * @brief Copy the tokens from the `idx'th` string element of `d_strings` to * the contiguous memory buffer. */ template <Dir dir> struct token_copier_fn { column_device_view const d_strings; // strings to split string_view const d_delimiter; // delimiter for split bool const has_validity = false; template <bool last> __device__ thrust::pair<size_type, size_type> compute_src_byte_offset_and_token_char_bytes( string_view const& d_str, size_type start_pos, size_type end_pos, size_type delimiter_pos) const { if (last) { auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(start_pos) : 0; auto const token_char_bytes = dir == Dir::FORWARD ? d_str.byte_offset(end_pos) - src_byte_offset : d_str.byte_offset(end_pos); return thrust::make_pair<size_type, size_type>( src_byte_offset, token_char_bytes); } else { auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(start_pos) : d_str.byte_offset(delimiter_pos + d_delimiter.length()); auto const token_char_bytes = dir == Dir::FORWARD ? d_str.byte_offset(delimiter_pos) - src_byte_offset : d_str.byte_offset(end_pos) - src_byte_offset; return thrust::make_pair<size_type, size_type>( src_byte_offset, token_char_bytes); } } __device__ void operator()(copy_info const info) const { if (info.token_count == 0) { return; } auto memory_ptr = static_cast<char*>(info.memory_ptr); auto const char_buf_size = cudf::experimental::detail::round_up_pow2(info.token_size_sum, split_align); auto const char_buf_ptr = memory_ptr; memory_ptr += char_buf_size; auto const offset_buf_ptr = reinterpret_cast<size_type*>(memory_ptr); auto const d_str = d_strings.element<string_view>(info.idx); size_type token_idx = 0; size_type char_bytes_copied = 0; size_type start_pos = 0; // updates only if moving forward auto end_pos = d_str.length(); // updates only if moving backward while (token_idx < info.token_count - 1) { auto const delimiter_pos = dir == Dir::FORWARD ? d_str.find(d_delimiter, start_pos) : d_str.rfind(d_delimiter, start_pos, end_pos); if (delimiter_pos != -1) { auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<false>( d_str, start_pos, end_pos, delimiter_pos); if (dir == Dir::FORWARD) { thrust::copy( thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { auto const char_buf_offset = info.token_size_sum - char_bytes_copied - offset_size_pair.second; thrust::copy( thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_buf_offset); offset_buf_ptr[info.token_count - 1 - token_idx] = char_buf_offset; } token_idx++; char_bytes_copied += offset_size_pair.second; if (dir == Dir::FORWARD) { start_pos = delimiter_pos + d_delimiter.length(); } else { end_pos = delimiter_pos; } } else { break; } } auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<true>( d_str, start_pos, end_pos, -1); if (dir == Dir::FORWARD) { thrust::copy( thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { thrust::copy(thrust::seq, d_str.data(), d_str.data() + offset_size_pair.second, char_buf_ptr); offset_buf_ptr[0] = 0; } offset_buf_ptr[info.token_count] = info.token_size_sum; } }; /** * @brief Compute the number of tokens, the total byte sizes of the tokens, and * required memory size for the `idx'th` string element of `d_strings`. */ template <Dir dir> struct whitespace_token_reader_fn { column_device_view const d_strings; // strings to split size_type const max_tokens = std::numeric_limits<size_type>::max(); bool const has_validity = false; template <bool last> __device__ size_type compute_token_char_bytes( string_view const& d_str, size_type cur_pos, size_type to_token_pos) const { if (last) { return dir == Dir::FORWARD ? d_str.byte_offset(d_str.length()) - d_str.byte_offset(to_token_pos) : d_str.byte_offset(to_token_pos + 1) - d_str.byte_offset(0); } else { return dir == Dir::FORWARD ? d_str.byte_offset(cur_pos) - d_str.byte_offset(to_token_pos) : d_str.byte_offset(to_token_pos + 1) - d_str.byte_offset(cur_pos + 1); } } __device__ thrust::tuple<size_type, size_type, size_type> operator()(size_type idx) const { if (has_validity && d_strings.is_null(idx)) { return thrust::make_tuple<size_type, size_type, size_type>(0, 0, 0); } auto const d_str = d_strings.element<string_view>(idx); size_type token_count = 0; size_type token_size_sum = 0; auto spaces = true; auto reached_max_tokens = false; size_type to_token_pos = 0; for (size_type i = 0; i < d_str.length(); ++i) { auto const cur_pos = dir == Dir::FORWARD ? i : d_str.length() - 1 - i; auto const ch = d_str[cur_pos]; if (spaces != (ch <= ' ')) { if (spaces) { // from whitespace(s) to a new token to_token_pos = cur_pos; } else { // from a token to whiltespace(s) if (token_count < max_tokens - 1) { token_count++; token_size_sum += compute_token_char_bytes<false>(d_str, cur_pos, to_token_pos); } else { reached_max_tokens = true; break; } } spaces = !spaces; } } if (reached_max_tokens || !spaces) { token_count++; token_size_sum += compute_token_char_bytes<true>(d_str, -1, to_token_pos); } if (token_count == 0) { // note that pandas.Series.str.split("", pat=" ") // returns one token (i.e. "") while // pandas.Series.str.split("") returns 0 token. return thrust::make_tuple<size_type, size_type, size_type>(0, 0, 0); } auto const memory_size = compute_memory_size(token_count, token_size_sum); return thrust::make_tuple<size_type, size_type, size_type>( token_count, token_size_sum, memory_size); } }; /** * @brief Copy the tokens from the `idx'th` string element of `d_strings` to * the contiguous memory buffer. */ template <Dir dir> struct whitespace_token_copier_fn { column_device_view const d_strings; // strings to split bool const has_validity = false; template <bool last> __device__ thrust::pair<size_type, size_type> compute_src_byte_offset_and_token_char_bytes( string_view const& d_str, size_type cur_pos, size_type to_token_pos, size_type remaining_bytes) const { if (last) { auto const token_char_bytes = remaining_bytes; auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(to_token_pos) : d_str.byte_offset(to_token_pos + 1) - token_char_bytes; return thrust::make_pair<size_type, size_type>( src_byte_offset, token_char_bytes); } else { auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(to_token_pos) : d_str.byte_offset(cur_pos + 1); auto const token_char_bytes = dir == Dir::FORWARD ? d_str.byte_offset(cur_pos) - src_byte_offset : d_str.byte_offset(to_token_pos + 1) - src_byte_offset; return thrust::make_pair<size_type, size_type>( src_byte_offset, token_char_bytes); } } __device__ void operator()(copy_info const info) const { if (info.token_count == 0) { return; } auto memory_ptr = static_cast<char*>(info.memory_ptr); auto const char_buf_size = cudf::experimental::detail::round_up_pow2(info.token_size_sum, split_align); auto const char_buf_ptr = memory_ptr; memory_ptr += char_buf_size; auto const offset_buf_ptr = reinterpret_cast<size_type*>(memory_ptr); auto const d_str = d_strings.element<string_view>(info.idx); size_type token_idx = 0; size_type char_bytes_copied = 0; auto spaces = true; size_type to_token_pos = 0; for (size_type i = 0; i < d_str.length(); ++i) { auto const cur_pos = dir == Dir::FORWARD ? i : d_str.length() - 1 - i; auto const ch = d_str[cur_pos]; if (spaces != (ch <= ' ')) { if (spaces) { // from whitespace(s) to a new token to_token_pos = cur_pos; } else { // from a token to whiltespace(s) if (token_idx < info.token_count - 1) { auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<false>( d_str, cur_pos, to_token_pos, info.token_size_sum - char_bytes_copied); if (dir == Dir::FORWARD) { thrust::copy( thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { auto const char_buf_offset = info.token_size_sum - char_bytes_copied - offset_size_pair.second; thrust::copy( thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_buf_offset); offset_buf_ptr[info.token_count - 1 - token_idx] = char_buf_offset; } token_idx++; char_bytes_copied += offset_size_pair.second; } else { break; } } spaces = !spaces; } } if (token_idx < info.token_count) { auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<true>( d_str, -1, to_token_pos, info.token_size_sum - char_bytes_copied); if (dir == Dir::FORWARD) { thrust::copy( thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { thrust::copy( thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr); offset_buf_ptr[0] = 0; } } offset_buf_ptr[info.token_count] = info.token_size_sum; } }; // Generic split function used by split and rsplit template<typename TokenCounter, typename Tokenizer> std::unique_ptr<experimental::table> split_fn( size_type strings_count, TokenCounter counter, Tokenizer tokenizer, rmm::mr::device_memory_resource* mr, cudaStream_t stream ) { auto execpol = rmm::exec_policy(stream); // compute the number of tokens per string size_type columns_count = 0; rmm::device_vector<size_type> token_counts(strings_count); auto d_token_counts = token_counts.data().get(); if( strings_count > 0 ) { thrust::transform( execpol->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), d_token_counts, counter ); // column count is the maximum number of tokens for any string columns_count = *thrust::max_element(execpol->on(stream), token_counts.begin(), token_counts.end() ); } std::vector<std::unique_ptr<column>> results; // boundary case: if no columns, return one null column (issue #119) if( columns_count==0 ) { results.push_back(std::make_unique<column>( data_type{STRING}, strings_count, rmm::device_buffer{0,stream,mr}, // no data create_null_mask(strings_count, ALL_NULL, stream, mr), strings_count )); } // Create each column. // Build a vector of pair<char*,int>'s' for each column. // Each pair points to a string for this column for each row. // Create the strings column using the strings factory. for( size_type col=0; col < columns_count; ++col ) { rmm::device_vector<string_index_pair> indexes(strings_count); string_index_pair* d_indexes = indexes.data().get(); thrust::transform(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), d_indexes, [tokenizer, col, columns_count, d_token_counts] __device__ (size_type idx) { return tokenizer(idx,col,columns_count, d_token_counts); }); auto column = make_strings_column(indexes,stream,mr); results.emplace_back(std::move(column)); } return std::make_unique<experimental::table>(std::move(results)); } // Generic split function used by split_record and rsplit_record template<typename TokenReader, typename TokenCopier> contiguous_split_record_result contiguous_split_record_fn( strings_column_view const& strings, TokenReader reader, TokenCopier copier, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { // read each string element of the input column to count the number of tokens // and compute the memory offsets auto strings_count = strings.size(); rmm::device_vector<size_type> d_token_counts(strings_count); rmm::device_vector<size_type> d_token_size_sums(strings_count); rmm::device_vector<size_type> d_memory_offsets(strings_count + 1); thrust::transform( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(strings_count), thrust::make_zip_iterator( thrust::make_tuple( d_token_counts.begin(), d_token_size_sums.begin(), d_memory_offsets.begin())), reader); thrust::exclusive_scan( rmm::exec_policy(stream)->on(stream), d_memory_offsets.begin(), d_memory_offsets.end(), d_memory_offsets.begin()); // allocate and copy thrust::host_vector<size_type> h_token_counts = d_token_counts; thrust::host_vector<size_type> h_token_size_sums = d_token_size_sums; thrust::host_vector<size_type> h_memory_offsets = d_memory_offsets; auto memory_size = h_memory_offsets.back(); auto all_data_ptr = std::make_unique<rmm::device_buffer>(memory_size, stream, mr); auto d_all_data_ptr = reinterpret_cast<char*>(all_data_ptr->data()); auto d_token_counts_ptr = d_token_counts.data().get(); auto d_memory_offsets_ptr = d_memory_offsets.data().get(); auto d_token_size_sums_ptr = d_token_size_sums.data().get(); auto copy_info_begin = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [d_all_data_ptr, d_token_counts_ptr, d_memory_offsets_ptr, d_token_size_sums_ptr] __device__ (auto i) { return copy_info{i, d_token_counts_ptr[i], d_token_size_sums_ptr[i], d_all_data_ptr + d_memory_offsets_ptr[i]}; }); thrust::for_each( rmm::exec_policy(stream)->on(stream), copy_info_begin, copy_info_begin + strings_count, copier); // update column_view objects std::vector<column_view> column_views{}; for (size_type i = 0; i < strings_count; ++i) { if (h_token_counts[i] == 0) { column_views.emplace_back(strings.parent().type(), 0, nullptr); } else { auto memory_ptr = d_all_data_ptr + h_memory_offsets[i]; auto char_buf_size = cudf::util::round_up_safe(h_token_size_sums[i], split_align); auto char_buf_ptr = memory_ptr; memory_ptr += char_buf_size; auto offset_buf_ptr = reinterpret_cast<size_type*>(memory_ptr); column_views.emplace_back( strings.parent().type(), h_token_counts[i], nullptr, nullptr, UNKNOWN_NULL_COUNT, 0, std::vector<column_view>{ column_view( strings.offsets().type(), h_token_counts[i] + 1, offset_buf_ptr), column_view( strings.chars().type(), h_token_size_sums[i], char_buf_ptr)}); } } CUDA_TRY(cudaStreamSynchronize(stream)); return contiguous_split_record_result{std::move(column_views), std::move(all_data_ptr)}; } } // namespace std::unique_ptr<experimental::table> split( strings_column_view const& strings, string_scalar const& delimiter = string_scalar(""), size_type maxsplit=-1, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), cudaStream_t stream = 0 ) { CUDF_EXPECTS( delimiter.is_valid(), "Parameter delimiter must be valid"); size_type max_tokens = 0; if( maxsplit > 0 ) max_tokens = maxsplit + 1; // makes consistent with Pandas auto strings_column = column_device_view::create(strings.parent(),stream); if( delimiter.size()==0 ) { return split_fn( strings.size(), whitespace_token_counter_fn{*strings_column,max_tokens}, whitespace_split_tokenizer_fn{*strings_column,max_tokens}, mr, stream); } string_view d_delimiter( delimiter.data(), delimiter.size() ); return split_fn( strings.size(), token_counter_fn{*strings_column,d_delimiter,max_tokens}, split_tokenizer_fn{*strings_column,d_delimiter}, mr, stream); } std::unique_ptr<experimental::table> rsplit( strings_column_view const& strings, string_scalar const& delimiter = string_scalar(""), size_type maxsplit=-1, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), cudaStream_t stream = 0 ) { CUDF_EXPECTS( delimiter.is_valid(), "Parameter delimiter must be valid"); size_type max_tokens = 0; if( maxsplit > 0 ) max_tokens = maxsplit + 1; // makes consistent with Pandas auto strings_column = column_device_view::create(strings.parent(),stream); if( delimiter.size()==0 ) { return split_fn( strings.size(), whitespace_token_counter_fn{*strings_column,max_tokens}, whitespace_rsplit_tokenizer_fn{*strings_column,max_tokens}, mr, stream); } string_view d_delimiter( delimiter.data(), delimiter.size() ); return split_fn( strings.size(), token_counter_fn{*strings_column,d_delimiter,max_tokens}, rsplit_tokenizer_fn{*strings_column,d_delimiter}, mr, stream); } template <Dir dir> contiguous_split_record_result contiguous_split_record( strings_column_view const& strings, string_scalar const& delimiter = string_scalar(""), size_type maxsplit = -1, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), cudaStream_t stream = 0) { CUDF_EXPECTS(delimiter.is_valid(), "Parameter delimiter must be valid"); // makes consistent with Pandas size_type max_tokens = maxsplit > 0 ? maxsplit + 1 : std::numeric_limits<size_type>::max(); auto has_validity = strings.parent().nullable(); auto d_strings_column_ptr = column_device_view::create(strings.parent(), stream); if (delimiter.size() == 0) { return contiguous_split_record_fn( strings, whitespace_token_reader_fn<dir>{ *d_strings_column_ptr, max_tokens, has_validity}, whitespace_token_copier_fn<dir>{*d_strings_column_ptr, has_validity}, mr, stream); } else { string_view d_delimiter(delimiter.data(), delimiter.size()); return contiguous_split_record_fn( strings, token_reader_fn<dir>{ *d_strings_column_ptr, d_delimiter, max_tokens, has_validity}, token_copier_fn<dir>{*d_strings_column_ptr, d_delimiter, has_validity}, mr, stream); } } } // namespace detail // external APIs std::unique_ptr<experimental::table> split( strings_column_view const& strings, string_scalar const& delimiter, size_type maxsplit, rmm::mr::device_memory_resource* mr ) { return detail::split( strings, delimiter, maxsplit, mr ); } std::unique_ptr<experimental::table> rsplit( strings_column_view const& strings, string_scalar const& delimiter, size_type maxsplit, rmm::mr::device_memory_resource* mr) { return detail::rsplit( strings, delimiter, maxsplit, mr ); } contiguous_split_record_result contiguous_split_record( strings_column_view const& strings, string_scalar const& delimiter, size_type maxsplit, rmm::mr::device_memory_resource* mr) { return detail::contiguous_split_record<detail::Dir::FORWARD>(strings, delimiter, maxsplit, mr, 0); } contiguous_split_record_result contiguous_rsplit_record( strings_column_view const& strings, string_scalar const& delimiter, size_type maxsplit, rmm::mr::device_memory_resource* mr) { return detail::contiguous_split_record<detail::Dir::BACKWARD>(strings, delimiter, maxsplit, mr, 0); } } // namespace strings } // namespace cudf
63eb8b2d57b7f0083303db5f184757e4f6138527.hip
// !!! This is a file automatically generated by hipify!!! #include "transpose_cuda.cuh" #include "transpose_ispc.h" #include <cuda_utils.cuh> #include <iostream> #include <numeric> #define uint size_t template <typename T> void fill_matrix(T *A, uint m, uint n) { std::iota(A, A + m * n, 0); } template <typename T> void executeCUDA(T *A, T *cuda_A, size_t N) { int *d_A = nullptr, *d_out_A = nullptr; uint nbytes = sizeof(T) * N * N; cudaCheck(hipMalloc((void **)&d_A, nbytes)); cudaCheck(hipMalloc((void **)&d_out_A, nbytes)); cudaCheck(hipMemcpy(d_A, A, nbytes, hipMemcpyHostToDevice)); uint threads = N > 1024 ? 1024 : N; uint blocks = N > 1024 ? N / 1024 + 1 : 1; // transpose_parallel_per_element<<<blocks, threads>>>(d_A, d_out_A, N, // 32); cudaCheckLaunch(transpose_parallel_per_element, blocks, threads, d_A, d_out_A, N, 32); cudaCheck(hipMemcpy(cuda_A, d_out_A, nbytes, hipMemcpyDeviceToHost)); hipFree(d_A); hipFree(d_out_A); } template <typename T> void executeISPC(T *A, T *ispc_A, uint N) { uint16_t threads = N > 1024 ? 1024 : N; uint16_t blocks = N > 1024 ? N / 1024 + 1 : 1; ispc::Dim3 grid{blocks, 1, 1}; ispc::Dim3 block{threads, 1, 1}; ispc::transpose_parallel_per_element_ispc(grid, block, A, ispc_A, N, 32); } template <typename T> void compareResults(T *A, T *B, uint N) { for (uint i = 0; i < N; i++) { for (uint j = 0; j < N; j++) { if (A[i * N + j] != B[i * N + j]) { std::cerr << "error at (i, j)=(" << i << ", " << j << ")\n"; } } } } int main(int argc, char *argv[]) { size_t N = 32; if (argc == 2) { N = strtoul(argv[argc - 1], nullptr, 10); } int *A = new int[N * N], *cuda_A = new int[N * N], *ispc_A = new int[N * N]; executeCUDA(A, cuda_A, N); executeISPC(A, ispc_A, N); compareResults(cuda_A, ispc_A, N); return 0; }
63eb8b2d57b7f0083303db5f184757e4f6138527.cu
#include "transpose_cuda.cuh" #include "transpose_ispc.h" #include <cuda_utils.cuh> #include <iostream> #include <numeric> #define uint size_t template <typename T> void fill_matrix(T *A, uint m, uint n) { std::iota(A, A + m * n, 0); } template <typename T> void executeCUDA(T *A, T *cuda_A, size_t N) { int *d_A = nullptr, *d_out_A = nullptr; uint nbytes = sizeof(T) * N * N; cudaCheck(cudaMalloc((void **)&d_A, nbytes)); cudaCheck(cudaMalloc((void **)&d_out_A, nbytes)); cudaCheck(cudaMemcpy(d_A, A, nbytes, cudaMemcpyHostToDevice)); uint threads = N > 1024 ? 1024 : N; uint blocks = N > 1024 ? N / 1024 + 1 : 1; // transpose_parallel_per_element<<<blocks, threads>>>(d_A, d_out_A, N, // 32); cudaCheckLaunch(transpose_parallel_per_element, blocks, threads, d_A, d_out_A, N, 32); cudaCheck(cudaMemcpy(cuda_A, d_out_A, nbytes, cudaMemcpyDeviceToHost)); cudaFree(d_A); cudaFree(d_out_A); } template <typename T> void executeISPC(T *A, T *ispc_A, uint N) { uint16_t threads = N > 1024 ? 1024 : N; uint16_t blocks = N > 1024 ? N / 1024 + 1 : 1; ispc::Dim3 grid{blocks, 1, 1}; ispc::Dim3 block{threads, 1, 1}; ispc::transpose_parallel_per_element_ispc(grid, block, A, ispc_A, N, 32); } template <typename T> void compareResults(T *A, T *B, uint N) { for (uint i = 0; i < N; i++) { for (uint j = 0; j < N; j++) { if (A[i * N + j] != B[i * N + j]) { std::cerr << "error at (i, j)=(" << i << ", " << j << ")\n"; } } } } int main(int argc, char *argv[]) { size_t N = 32; if (argc == 2) { N = strtoul(argv[argc - 1], nullptr, 10); } int *A = new int[N * N], *cuda_A = new int[N * N], *ispc_A = new int[N * N]; executeCUDA(A, cuda_A, N); executeISPC(A, ispc_A, N); compareResults(cuda_A, ispc_A, N); return 0; }
7bd289e10ab718801c71f254570f99c1430f266d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/dropout_op.h" #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { __global__ void DropoutKernel(const int N, const float ratio, const float* Xdata, float* Ydata, bool* maskdata) { const float scale = 1. / (1. - ratio); CUDA_1D_KERNEL_LOOP(i, N) { maskdata[i] = (Ydata[i] > ratio); Ydata[i] = Xdata[i] * scale * maskdata[i]; } } } // namespace template <> bool DropoutOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); auto* mask = Output(1); Y->Reshape(X.dims()); mask->Reshape(X.dims()); CAFFE_DCHECK_GT(X.size(), 0); if (is_test_) { context_.Copy<float, CUDAContext, CUDAContext>( X.size(), X.data<float>(), Y->mutable_data<float>()); return true; } else { // We do a simple trick here: since hiprand cannot generate random // boolean numbers, we will generate into dY and write the result to // mask. float* Ydata = Y->mutable_data<float>(); CURAND_CHECK(hiprandGenerateUniform( context_.curand_generator(), Ydata, X.size())); hipLaunchKernelGGL(( DropoutKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), ratio_, X.data<float>(), Ydata, mask->mutable_data<bool>()); return true; } } namespace { __global__ void DropoutGradientKernel(const int N, const float* dYdata, const bool* maskdata, const float scale, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { dXdata[i] = dYdata[i] * maskdata[i] * scale; } } } // namespace template <> bool DropoutGradientOp<float, CUDAContext>::RunOnDevice() { auto& dY = Input(0); auto& mask = Input(1); auto* dX = Output(0); CAFFE_DCHECK_GT(dY.size(), 0); CAFFE_DCHECK_EQ(dY.size(), mask.size()); dX->Reshape(dY.dims()); if (is_test_) { context_.Copy<float, CUDAContext, CUDAContext>( dY.size(), dY.data<float>(), dX->mutable_data<float>()); return true; } else { const float scale = 1. / (1. - ratio_); hipLaunchKernelGGL(( DropoutGradientKernel), dim3(CAFFE_GET_BLOCKS(dY.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), dY.size(), dY.data<float>(), mask.data<bool>(), scale, dX->mutable_data<float>()); return true; } } namespace { REGISTER_CUDA_OPERATOR(Dropout, DropoutOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(DropoutGrad, DropoutGradientOp<float, CUDAContext>); } // namespace } // namespace caffe2
7bd289e10ab718801c71f254570f99c1430f266d.cu
#include "caffe2/operators/dropout_op.h" #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { __global__ void DropoutKernel(const int N, const float ratio, const float* Xdata, float* Ydata, bool* maskdata) { const float scale = 1. / (1. - ratio); CUDA_1D_KERNEL_LOOP(i, N) { maskdata[i] = (Ydata[i] > ratio); Ydata[i] = Xdata[i] * scale * maskdata[i]; } } } // namespace template <> bool DropoutOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); auto* mask = Output(1); Y->Reshape(X.dims()); mask->Reshape(X.dims()); CAFFE_DCHECK_GT(X.size(), 0); if (is_test_) { context_.Copy<float, CUDAContext, CUDAContext>( X.size(), X.data<float>(), Y->mutable_data<float>()); return true; } else { // We do a simple trick here: since curand cannot generate random // boolean numbers, we will generate into dY and write the result to // mask. float* Ydata = Y->mutable_data<float>(); CURAND_CHECK(curandGenerateUniform( context_.curand_generator(), Ydata, X.size())); DropoutKernel<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), ratio_, X.data<float>(), Ydata, mask->mutable_data<bool>()); return true; } } namespace { __global__ void DropoutGradientKernel(const int N, const float* dYdata, const bool* maskdata, const float scale, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { dXdata[i] = dYdata[i] * maskdata[i] * scale; } } } // namespace template <> bool DropoutGradientOp<float, CUDAContext>::RunOnDevice() { auto& dY = Input(0); auto& mask = Input(1); auto* dX = Output(0); CAFFE_DCHECK_GT(dY.size(), 0); CAFFE_DCHECK_EQ(dY.size(), mask.size()); dX->Reshape(dY.dims()); if (is_test_) { context_.Copy<float, CUDAContext, CUDAContext>( dY.size(), dY.data<float>(), dX->mutable_data<float>()); return true; } else { const float scale = 1. / (1. - ratio_); DropoutGradientKernel<<<CAFFE_GET_BLOCKS(dY.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( dY.size(), dY.data<float>(), mask.data<bool>(), scale, dX->mutable_data<float>()); return true; } } namespace { REGISTER_CUDA_OPERATOR(Dropout, DropoutOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(DropoutGrad, DropoutGradientOp<float, CUDAContext>); } // namespace } // namespace caffe2
79945948c15d25f3619d2c40636540b2a5717ed2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void ge_linear_frac (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, const REAL scalea, const REAL shifta, const REAL scaleb, const REAL shiftb, REAL* c, const int offset_c, const int ld_c) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < fd); if (valid) { c[offset_c + gid_0 + gid_1 * ld_c] = (scalea * a[offset_a + gid_0 + gid_1 * ld_a] + shifta) / (scaleb * b[offset_b + gid_0 + gid_1 * ld_b] + shiftb); } }
79945948c15d25f3619d2c40636540b2a5717ed2.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void ge_linear_frac (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, const REAL scalea, const REAL shifta, const REAL scaleb, const REAL shiftb, REAL* c, const int offset_c, const int ld_c) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < fd); if (valid) { c[offset_c + gid_0 + gid_1 * ld_c] = (scalea * a[offset_a + gid_0 + gid_1 * ld_a] + shifta) / (scaleb * b[offset_b + gid_0 + gid_1 * ld_b] + shiftb); } }
554203998c7dd91b312ab5ccfabeeaf10964218e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> //Code written by Alan Fleming //CONSTANTS #define MATRIXSIZE 131072 #define BLOCKSIZE 1024 //Code to preform sum reduction using the cpu int SumReductionCPU(int* x, int N){ int sum = 0; for(int i = 0; i < N; i++){ sum += x[i]; } return sum; } __global__ void sumReductionKernal(int* input, int* output) { //initialize Partial Result for thread __shared__ int partialResult[2 * BLOCKSIZE]; unsigned int start = 2*blockIdx.x * blockDim.x; partialResult[threadIdx.x] = input[start + threadIdx.x]; partialResult[blockDim.x + threadIdx.x] = input[start +blockDim.x + threadIdx.x]; //Preform sum reduction for(unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride){ partialResult[threadIdx.x] += partialResult[threadIdx.x + stride]; } } __syncthreads(); if(threadIdx.x == 0){ //write block sum to global memory output[blockIdx.x] = partialResult[0]; } } int main() { int *a = (int *)malloc(sizeof(int) * MATRIXSIZE); //allocate space for array int *b = (int *)malloc(sizeof(int) * MATRIXSIZE); //allocate space for array //initialize array int init = 1325; for(int i=0; i<MATRIXSIZE;i++){ init = 3125 * init % 6553; a[i] = (init - 1000) % 97; b[i] = 0; } //Test CPU reduction //Get start time clock_t t1 = clock(); //Calculate reduction int cpuResult = SumReductionCPU(a, MATRIXSIZE); //Get stop time clock_t t2 = clock(); //Calculate runtime float cpuTime= (float(t2-t1)/CLOCKS_PER_SEC*1000); //Allocate memory on GPU compution. dev_b is used to store the results of the first pass of reduction int *dev_a, *dev_b; hipMalloc((void **)(&dev_a), MATRIXSIZE *sizeof(int)); hipMalloc((void **)(&dev_b), MATRIXSIZE *sizeof(int)); //copy memory to gpu hipMemcpy(dev_a,a, MATRIXSIZE * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b,b, MATRIXSIZE * sizeof(int), hipMemcpyHostToDevice); //calculate dimentions for gpu dim3 dimBlock(BLOCKSIZE); dim3 dimGrid(ceil(double(MATRIXSIZE)/dimBlock.x/2)); //Set up cuda events for recording runtime hipEvent_t start,stop; float gpuTime; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); //Calculate GPU Reduction for each block hipLaunchKernelGGL(( sumReductionKernal), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_a, dev_b); //Calculate GPU Recuction for block results hipLaunchKernelGGL(( sumReductionKernal), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_b, dev_a); //calculate runtime hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&gpuTime,start,stop); //destroy cuda events hipEventDestroy(start); hipEventDestroy(stop); //copy sum from gpu hipMemcpy(a, dev_a, sizeof(int), hipMemcpyDeviceToHost); //print speedup printf("CPU Runtime: %f\nGpu Runtime: %f\nSpeedup: %f\n", (double)cpuTime, (double)gpuTime, double(cpuTime / gpuTime)); //print reduction results printf("CPU Result: %d\nGPU Result: %d\n", cpuResult, a[0]); //verify results if(cpuResult == a[0]) { printf("TEST PASSED\n"); } else { printf("TEST FAILED\n"); } //free memory free(a); hipFree(dev_a); hipFree(dev_b); return 0; }
554203998c7dd91b312ab5ccfabeeaf10964218e.cu
#include <stdio.h> #include <math.h> #include <time.h> #include <cuda.h> //Code written by Alan Fleming //CONSTANTS #define MATRIXSIZE 131072 #define BLOCKSIZE 1024 //Code to preform sum reduction using the cpu int SumReductionCPU(int* x, int N){ int sum = 0; for(int i = 0; i < N; i++){ sum += x[i]; } return sum; } __global__ void sumReductionKernal(int* input, int* output) { //initialize Partial Result for thread __shared__ int partialResult[2 * BLOCKSIZE]; unsigned int start = 2*blockIdx.x * blockDim.x; partialResult[threadIdx.x] = input[start + threadIdx.x]; partialResult[blockDim.x + threadIdx.x] = input[start +blockDim.x + threadIdx.x]; //Preform sum reduction for(unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride){ partialResult[threadIdx.x] += partialResult[threadIdx.x + stride]; } } __syncthreads(); if(threadIdx.x == 0){ //write block sum to global memory output[blockIdx.x] = partialResult[0]; } } int main() { int *a = (int *)malloc(sizeof(int) * MATRIXSIZE); //allocate space for array int *b = (int *)malloc(sizeof(int) * MATRIXSIZE); //allocate space for array //initialize array int init = 1325; for(int i=0; i<MATRIXSIZE;i++){ init = 3125 * init % 6553; a[i] = (init - 1000) % 97; b[i] = 0; } //Test CPU reduction //Get start time clock_t t1 = clock(); //Calculate reduction int cpuResult = SumReductionCPU(a, MATRIXSIZE); //Get stop time clock_t t2 = clock(); //Calculate runtime float cpuTime= (float(t2-t1)/CLOCKS_PER_SEC*1000); //Allocate memory on GPU compution. dev_b is used to store the results of the first pass of reduction int *dev_a, *dev_b; cudaMalloc((void **)(&dev_a), MATRIXSIZE *sizeof(int)); cudaMalloc((void **)(&dev_b), MATRIXSIZE *sizeof(int)); //copy memory to gpu cudaMemcpy(dev_a,a, MATRIXSIZE * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b,b, MATRIXSIZE * sizeof(int), cudaMemcpyHostToDevice); //calculate dimentions for gpu dim3 dimBlock(BLOCKSIZE); dim3 dimGrid(ceil(double(MATRIXSIZE)/dimBlock.x/2)); //Set up cuda events for recording runtime cudaEvent_t start,stop; float gpuTime; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); //Calculate GPU Reduction for each block sumReductionKernal<<<dimGrid, dimBlock>>>(dev_a, dev_b); //Calculate GPU Recuction for block results sumReductionKernal<<<dimGrid, dimBlock>>>(dev_b, dev_a); //calculate runtime cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpuTime,start,stop); //destroy cuda events cudaEventDestroy(start); cudaEventDestroy(stop); //copy sum from gpu cudaMemcpy(a, dev_a, sizeof(int), cudaMemcpyDeviceToHost); //print speedup printf("CPU Runtime: %f\nGpu Runtime: %f\nSpeedup: %f\n", (double)cpuTime, (double)gpuTime, double(cpuTime / gpuTime)); //print reduction results printf("CPU Result: %d\nGPU Result: %d\n", cpuResult, a[0]); //verify results if(cpuResult == a[0]) { printf("TEST PASSED\n"); } else { printf("TEST FAILED\n"); } //free memory free(a); cudaFree(dev_a); cudaFree(dev_b); return 0; }
2c78392bdbba54940a1ba10d28b05577ef04fe54.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define M 512 #define P 256 #define N 128 #define K 64 // My own implementation of the Matrix Multiplication __global__ void globalMatrixMultiplication1D(double A[M][P], double B[P][N], double C[M][N]) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int i = (int)(idx / N); int j = idx % N; for (int k = 0; k < P; k++) C[i][j] += A[i][k] * B[k][j]; } void verifyMatrixMultiplication(double C[M][N], double check) { int maxError = 0; for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { maxError += (int)abs(C[i][j] - check); } } printf("Maximum Error = %d\n", maxError); } float matrixMultiplication1DHost(bool verbose) { double A[M][P], B[P][N], C[M][N]; for (int i = 0; i < M; i++) { for (int j = 0; j < P; j++) A[i][j] = 1.0; } for (int i = 0; i < P; i++) { for (int j = 0; j < N; j++) B[i][j] = 2.0; } for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) C[i][j] = 0.0; } double (*dA)[P], (*dB)[N], (*dC)[N]; hipMalloc((void**)&dA, sizeof(double) * M * P); hipMalloc((void**)&dB, sizeof(double) * P * N); hipMalloc((void**)&dC, sizeof(double) * M * N); hipMemcpy(dA, A, sizeof(double) * M * P, hipMemcpyHostToDevice); hipMemcpy(dB, B, sizeof(double) * P * N, hipMemcpyHostToDevice); hipMemcpy(dC, C, sizeof(double) * M * N, hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( globalMatrixMultiplication1D), dim3(M*N/K), dim3(K), 0, 0, dA, dB, dC); hipEventRecord(stop); hipEventSynchronize(stop); hipMemcpy(C, dC, sizeof(double) * M * N, hipMemcpyDeviceToHost); hipFree(dA); hipFree(dB); hipFree(dC); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); if (verbose) { printf("Elapsed Time = %f milliseconds\n", elapsedTime); verifyMatrixMultiplication(C, P * 1.0 * 2.0); } hipEventDestroy(start); hipEventDestroy(stop); return elapsedTime; } int main() { int count = 100; float averageTime = 0; for (int i = 0; i < count; i++) averageTime += matrixMultiplication1DHost(false); averageTime /= count; printf("[GPU - Double] (Matrix Multiplication 1D - Global) Average Elapsed Time = %f ms\n", averageTime); return 0; }
2c78392bdbba54940a1ba10d28b05577ef04fe54.cu
#include <stdio.h> #define M 512 #define P 256 #define N 128 #define K 64 // My own implementation of the Matrix Multiplication __global__ void globalMatrixMultiplication1D(double A[M][P], double B[P][N], double C[M][N]) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int i = (int)(idx / N); int j = idx % N; for (int k = 0; k < P; k++) C[i][j] += A[i][k] * B[k][j]; } void verifyMatrixMultiplication(double C[M][N], double check) { int maxError = 0; for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { maxError += (int)abs(C[i][j] - check); } } printf("Maximum Error = %d\n", maxError); } float matrixMultiplication1DHost(bool verbose) { double A[M][P], B[P][N], C[M][N]; for (int i = 0; i < M; i++) { for (int j = 0; j < P; j++) A[i][j] = 1.0; } for (int i = 0; i < P; i++) { for (int j = 0; j < N; j++) B[i][j] = 2.0; } for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) C[i][j] = 0.0; } double (*dA)[P], (*dB)[N], (*dC)[N]; cudaMalloc((void**)&dA, sizeof(double) * M * P); cudaMalloc((void**)&dB, sizeof(double) * P * N); cudaMalloc((void**)&dC, sizeof(double) * M * N); cudaMemcpy(dA, A, sizeof(double) * M * P, cudaMemcpyHostToDevice); cudaMemcpy(dB, B, sizeof(double) * P * N, cudaMemcpyHostToDevice); cudaMemcpy(dC, C, sizeof(double) * M * N, cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); globalMatrixMultiplication1D<<<M*N/K, K>>>(dA, dB, dC); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaMemcpy(C, dC, sizeof(double) * M * N, cudaMemcpyDeviceToHost); cudaFree(dA); cudaFree(dB); cudaFree(dC); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); if (verbose) { printf("Elapsed Time = %f milliseconds\n", elapsedTime); verifyMatrixMultiplication(C, P * 1.0 * 2.0); } cudaEventDestroy(start); cudaEventDestroy(stop); return elapsedTime; } int main() { int count = 100; float averageTime = 0; for (int i = 0; i < count; i++) averageTime += matrixMultiplication1DHost(false); averageTime /= count; printf("[GPU - Double] (Matrix Multiplication 1D - Global) Average Elapsed Time = %f ms\n", averageTime); return 0; }
6bab3e9f08c1e97f0f32558e13f74faa72127e9d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <time.h> #include <assert.h> #include "sdl_wrapper/sdl.h" #include "geometry/nodeList.cuh" #include "utils/cudaList.cuh" #include "geometry/geometry.cuh" #include "color.cuh" #include "camera.cuh" #include "utils/utils.cuh" using CUDA_RAY::Camera; using CUDA_RAY::Color; using CUDA_RAY::Node; using CUDA_RAY::NodeList; using CUDA_RAY::Ray; using CUDA_RAY::Light; using CUDA_RAY::Vector; using CUDA_RAY::CudaList; using CUDA_RAY::Lambert; using CUDA_RAY::CheckerTexture; const int LIGHTS_COUNT = 2; constexpr const int NODES_COUNT = 2; #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) void check_cuda(hipError_t result, char const* const func, const char* const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting hipDeviceReset(); exit(99); } } __device__ Color raytrace(const CudaList<Node>* list, const Ray& ray, const CudaList<Light>* lights) { Node* closestNode = nullptr; CUDA_RAY::IntersectInfo closestInfo; float closestDist = INF; //printf(" %i \n", list->getSize()); //printf(" lapdplsadl \n"); for (int i = 0; i < list->getSize(); ++i) { CUDA_RAY::IntersectInfo info; Node* node = (*list)[i]; if (!node->geom_->intersect(ray, info)) continue; if (info.distance_ < closestDist) { closestDist = info.distance_; closestNode = node; closestInfo = info; } } // check if we hit the sky: if (closestNode == nullptr) return Color(0, 0, 0); // TODO: return background color else return closestNode->shader_->shade(ray, closestInfo, lights, list); } __global__ void render(Camera** cam, CudaList<Node>* list, CudaList<Light>* lights, Color* vfb, int x, int y) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; // printf("threadIdx.x %i, blockIdx.x %i, blockDim.x %i \n", threadIdx.x, blockIdx.x, blockDim.x); if ((i >= x) || (j >= y)) { printf("OUTSIDE!"); printf("i = %d, j = %d", i, j); return; } CUDA_RAY::Ray ray = (*cam)->getScreenRay(j, i); int index = i * x + j; vfb[index] = raytrace(list, ray, lights); } __global__ void setupCamera(Camera** cam) { *cam = new CUDA_RAY::Camera; (*cam)->position_ = CUDA_RAY::Vector(35, 60, 20); (*cam)->yaw_ = 0; (*cam)->pitch_ = 0; (*cam)->roll_ = 0; (*cam)->fov_ = 90; (*cam)->aspectRatio_ = float(RESX) / float(RESY); (*cam)->frameBegin(); } __global__ void setupNodes(CudaList<Node>* list) { if (threadIdx.x == 0 && blockIdx.x == 0) { list->list_ = new Node[NODES_COUNT]; list->cap_ = NODES_COUNT; Node a; a.geom_ = new CUDA_RAY::Plane(1.f); a.shader_ = new CUDA_RAY::Lambert(new CheckerTexture(Color(1.f, 0.f, 0.f), Color(0.f, 1.f, 0.f))); list->addElem(a); Node b; b.geom_ = new CUDA_RAY::Sphere(Vector(20, 50, 60), 10); b.shader_ = new CUDA_RAY::Lambert(new CheckerTexture(Color(0.5f, 0.f, 0.f), Color(0.1f, 0.7f, 0.3f), 5.f)); list->addElem(b); /*list->list_[0].geom_ = new CUDA_RAY::Plane(1.f); list->list_[0].shader_ = new CUDA_RAY::CheckerShader(Color(1.f, 0.f, 0.f), Color(0.f, 1.f, 0.f)); list->size_ = NODES_COUNT;*/ } } __global__ void setupLights(CudaList<Light>* lights) { if (threadIdx.x == 0 && blockIdx.x == 0) { lights->list_ = new Light[LIGHTS_COUNT]; lights->cap_ = LIGHTS_COUNT; Light a; a.pos_ = Vector(0, 100, 10); a.intensity_ = 11000.f; Light b; b.pos_ = Vector(40, 100, 10); b.intensity_ = 11000.f; lights->addElem(b); lights->addElem(a); /*a.pos_ = Vector(35, 5, 200); lights->addElem(a);*/ /*a.pos_ = Vector(35, 5, 300); lights->addElem(a);*/ } } __global__ void freeWorld(Camera** cam, CudaList<Node>* list, CudaList<Light>* lights) { if (threadIdx.x == 0 && blockIdx.x == 0) { delete *cam; /*delete lights[0]; delete lights[1]; delete[] lights;*/ delete list->list_[0].shader_; delete list->list_[0].geom_; delete list->list_[1].shader_; delete list->list_[1].geom_; // delete[] list->list_; } } int main(int argc, char* args[]) { CXX::SdlObject& sdl = CXX::SdlObject::instance(); int num_pixels = sdl.frameWidth() * sdl.frameHeight(); size_t vfb_size = num_pixels * sizeof(CUDA_RAY::Color); CUDA_RAY::Color* vfb = nullptr; checkCudaErrors(hipMallocManaged((void**)&vfb, vfb_size)); // Camera setup Camera** camera; checkCudaErrors(hipMalloc((void**)&camera, sizeof(CUDA_RAY::Camera*))); hipLaunchKernelGGL(( setupCamera), dim3(1), dim3(1), 0, 0, camera); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError()); // Geometries and shaders setup CudaList<Node>* objects; checkCudaErrors(hipMalloc(&objects, sizeof(CudaList<Node>))); hipLaunchKernelGGL(( setupNodes), dim3(1), dim3(1), 0, 0, objects); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError()); // Lightning setup CudaList<Light>* lights; checkCudaErrors(hipMalloc(&lights, sizeof(CudaList<Light>))); hipLaunchKernelGGL(( setupLights), dim3(1), dim3(1), 0, 0, lights); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError()); int tx = 8; int ty = 8; dim3 blocks((sdl.frameHeight() / tx), (sdl.frameWidth() / ty)); dim3 threads(tx, ty); // Render our buffer clock_t start, stop; start = clock(); hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, camera, objects, lights, vfb, sdl.frameWidth(), sdl.frameWidth()); // Wait for GPU to finish before accessing on host checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError()); stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; std::cout << "took " << timer_seconds << " seconds.\n"; sdl.displayVFB<CUDA_RAY::Color>(vfb); sdl.waitForUserExit(); hipLaunchKernelGGL(( freeWorld), dim3(1), dim3(1), 0, 0, camera, objects, lights); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipFree(vfb)); checkCudaErrors(hipFree(objects)); checkCudaErrors(hipFree(camera)); checkCudaErrors(hipFree(lights)); hipDeviceReset(); return 0; }
6bab3e9f08c1e97f0f32558e13f74faa72127e9d.cu
#include <iostream> #include <time.h> #include <assert.h> #include "sdl_wrapper/sdl.h" #include "geometry/nodeList.cuh" #include "utils/cudaList.cuh" #include "geometry/geometry.cuh" #include "color.cuh" #include "camera.cuh" #include "utils/utils.cuh" using CUDA_RAY::Camera; using CUDA_RAY::Color; using CUDA_RAY::Node; using CUDA_RAY::NodeList; using CUDA_RAY::Ray; using CUDA_RAY::Light; using CUDA_RAY::Vector; using CUDA_RAY::CudaList; using CUDA_RAY::Lambert; using CUDA_RAY::CheckerTexture; const int LIGHTS_COUNT = 2; constexpr const int NODES_COUNT = 2; #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) void check_cuda(cudaError_t result, char const* const func, const char* const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting cudaDeviceReset(); exit(99); } } __device__ Color raytrace(const CudaList<Node>* list, const Ray& ray, const CudaList<Light>* lights) { Node* closestNode = nullptr; CUDA_RAY::IntersectInfo closestInfo; float closestDist = INF; //printf(" %i \n", list->getSize()); //printf(" lapdplsadl \n"); for (int i = 0; i < list->getSize(); ++i) { CUDA_RAY::IntersectInfo info; Node* node = (*list)[i]; if (!node->geom_->intersect(ray, info)) continue; if (info.distance_ < closestDist) { closestDist = info.distance_; closestNode = node; closestInfo = info; } } // check if we hit the sky: if (closestNode == nullptr) return Color(0, 0, 0); // TODO: return background color else return closestNode->shader_->shade(ray, closestInfo, lights, list); } __global__ void render(Camera** cam, CudaList<Node>* list, CudaList<Light>* lights, Color* vfb, int x, int y) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; // printf("threadIdx.x %i, blockIdx.x %i, blockDim.x %i \n", threadIdx.x, blockIdx.x, blockDim.x); if ((i >= x) || (j >= y)) { printf("OUTSIDE!"); printf("i = %d, j = %d", i, j); return; } CUDA_RAY::Ray ray = (*cam)->getScreenRay(j, i); int index = i * x + j; vfb[index] = raytrace(list, ray, lights); } __global__ void setupCamera(Camera** cam) { *cam = new CUDA_RAY::Camera; (*cam)->position_ = CUDA_RAY::Vector(35, 60, 20); (*cam)->yaw_ = 0; (*cam)->pitch_ = 0; (*cam)->roll_ = 0; (*cam)->fov_ = 90; (*cam)->aspectRatio_ = float(RESX) / float(RESY); (*cam)->frameBegin(); } __global__ void setupNodes(CudaList<Node>* list) { if (threadIdx.x == 0 && blockIdx.x == 0) { list->list_ = new Node[NODES_COUNT]; list->cap_ = NODES_COUNT; Node a; a.geom_ = new CUDA_RAY::Plane(1.f); a.shader_ = new CUDA_RAY::Lambert(new CheckerTexture(Color(1.f, 0.f, 0.f), Color(0.f, 1.f, 0.f))); list->addElem(a); Node b; b.geom_ = new CUDA_RAY::Sphere(Vector(20, 50, 60), 10); b.shader_ = new CUDA_RAY::Lambert(new CheckerTexture(Color(0.5f, 0.f, 0.f), Color(0.1f, 0.7f, 0.3f), 5.f)); list->addElem(b); /*list->list_[0].geom_ = new CUDA_RAY::Plane(1.f); list->list_[0].shader_ = new CUDA_RAY::CheckerShader(Color(1.f, 0.f, 0.f), Color(0.f, 1.f, 0.f)); list->size_ = NODES_COUNT;*/ } } __global__ void setupLights(CudaList<Light>* lights) { if (threadIdx.x == 0 && blockIdx.x == 0) { lights->list_ = new Light[LIGHTS_COUNT]; lights->cap_ = LIGHTS_COUNT; Light a; a.pos_ = Vector(0, 100, 10); a.intensity_ = 11000.f; Light b; b.pos_ = Vector(40, 100, 10); b.intensity_ = 11000.f; lights->addElem(b); lights->addElem(a); /*a.pos_ = Vector(35, 5, 200); lights->addElem(a);*/ /*a.pos_ = Vector(35, 5, 300); lights->addElem(a);*/ } } __global__ void freeWorld(Camera** cam, CudaList<Node>* list, CudaList<Light>* lights) { if (threadIdx.x == 0 && blockIdx.x == 0) { delete *cam; /*delete lights[0]; delete lights[1]; delete[] lights;*/ delete list->list_[0].shader_; delete list->list_[0].geom_; delete list->list_[1].shader_; delete list->list_[1].geom_; // delete[] list->list_; } } int main(int argc, char* args[]) { CXX::SdlObject& sdl = CXX::SdlObject::instance(); int num_pixels = sdl.frameWidth() * sdl.frameHeight(); size_t vfb_size = num_pixels * sizeof(CUDA_RAY::Color); CUDA_RAY::Color* vfb = nullptr; checkCudaErrors(cudaMallocManaged((void**)&vfb, vfb_size)); // Camera setup Camera** camera; checkCudaErrors(cudaMalloc((void**)&camera, sizeof(CUDA_RAY::Camera*))); setupCamera<<<1, 1>>>(camera); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); // Geometries and shaders setup CudaList<Node>* objects; checkCudaErrors(cudaMalloc(&objects, sizeof(CudaList<Node>))); setupNodes<<<1, 1>>> (objects); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); // Lightning setup CudaList<Light>* lights; checkCudaErrors(cudaMalloc(&lights, sizeof(CudaList<Light>))); setupLights<<<1, 1>>>(lights); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); int tx = 8; int ty = 8; dim3 blocks((sdl.frameHeight() / tx), (sdl.frameWidth() / ty)); dim3 threads(tx, ty); // Render our buffer clock_t start, stop; start = clock(); render<<<blocks, threads>>>(camera, objects, lights, vfb, sdl.frameWidth(), sdl.frameWidth()); // Wait for GPU to finish before accessing on host checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; std::cout << "took " << timer_seconds << " seconds.\n"; sdl.displayVFB<CUDA_RAY::Color>(vfb); sdl.waitForUserExit(); freeWorld<<<1, 1>>>(camera, objects, lights); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaFree(vfb)); checkCudaErrors(cudaFree(objects)); checkCudaErrors(cudaFree(camera)); checkCudaErrors(cudaFree(lights)); cudaDeviceReset(); return 0; }
b738e6bcf6cd1bc707508613c0bd9ba74c916246.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void convertKinectDisparityToRegularDisparity_kernel( float *d_regularDisparity, int d_regularDisparityPitch, const float *d_KinectDisparity, int d_KinectDisparityPitch, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < width) & (y < height)) { // are we in the image? float d_in = *((float *)((char *)d_KinectDisparity + y * d_KinectDisparityPitch) + x); float d_out = (d_in == 0.0f) ? nanf("") : -d_in; *((float *)((char *)d_regularDisparity + y *d_regularDisparityPitch) + x) = d_out; } }
b738e6bcf6cd1bc707508613c0bd9ba74c916246.cu
#include "includes.h" __global__ void convertKinectDisparityToRegularDisparity_kernel( float *d_regularDisparity, int d_regularDisparityPitch, const float *d_KinectDisparity, int d_KinectDisparityPitch, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < width) & (y < height)) { // are we in the image? float d_in = *((float *)((char *)d_KinectDisparity + y * d_KinectDisparityPitch) + x); float d_out = (d_in == 0.0f) ? nanf("") : -d_in; *((float *)((char *)d_regularDisparity + y *d_regularDisparityPitch) + x) = d_out; } }
016cd381c74848cd0a1786f3f1522e5bfc364f97.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This example introduces __device__ functions, which are special functions // which may be called from code executing on the device. #include <stdlib.h> #include <stdio.h> // __device__ functions may only be called from __global__ functions or other // __device__ functions. Unlike __global__ functions, __device__ functions are // not configured, and have no restriction on return type. __device__ int get_constant(void) { // just return 7 return 7; } __device__ int get_block_index(void) { // return the index of the current thread's block return blockIdx.x; } __device__ int get_thread_index(void) { // return the index of the current thread within its block return threadIdx.x; } __device__ int get_global_index(void) { // return the index of the current thread across the entire grid launch return blockIdx.x * blockDim.x + threadIdx.x; } // kernel1 returns the result of calling the __device__ function return_constant(): __global__ void kernel1(int *array) { int index = get_global_index(); array[index] = get_constant(); } // kernel2 returns the result of calling the __device__ function return_block_index(): __global__ void kernel2(int *array) { int index = get_global_index(); array[index] = get_block_index(); } // kernel3 returns the result of calling the __device__ function return_thread_index(): __global__ void kernel3(int *array) { int index = get_global_index(); array[index] = get_thread_index(); } // kernel4 returns the result of calling the __device__ function return_thread_index(): __global__ void kernel4(int *array) { int index = get_global_index(); array[index] = get_global_index(); } int main(void) { int num_elements = 256; int num_bytes = num_elements * sizeof(int); int *device_array = 0; int *host_array = 0; // malloc a host array host_array = (int*)malloc(num_bytes); // hipMalloc a device array hipMalloc((void**)&device_array, num_bytes); // if either memory allocation failed, report an error message if(host_array == 0 || device_array == 0) { printf("couldn't allocate memory\n"); return 1; } // choose a launch configuration int block_size = 128; int grid_size = num_elements / block_size; // launch each kernel and print out the results hipLaunchKernelGGL(( kernel1), dim3(grid_size),dim3(block_size), 0, 0, device_array); hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost); printf("kernel1 results:\n"); for(int i=0; i < num_elements; ++i) { printf("%d ", host_array[i]); } printf("\n\n"); hipLaunchKernelGGL(( kernel2), dim3(grid_size),dim3(block_size), 0, 0, device_array); hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost); printf("kernel2 results:\n"); for(int i=0; i < num_elements; ++i) { printf("%d ", host_array[i]); } printf("\n\n"); hipLaunchKernelGGL(( kernel3), dim3(grid_size),dim3(block_size), 0, 0, device_array); hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost); printf("kernel3 results:\n"); for(int i=0; i < num_elements; ++i) { printf("%d ", host_array[i]); } printf("\n\n"); hipLaunchKernelGGL(( kernel4), dim3(grid_size),dim3(block_size), 0, 0, device_array); hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost); printf("kernel4 results:\n"); for(int i=0; i < num_elements; ++i) { printf("%d ", host_array[i]); } printf("\n\n"); // deallocate memory free(host_array); hipFree(device_array); }
016cd381c74848cd0a1786f3f1522e5bfc364f97.cu
// This example introduces __device__ functions, which are special functions // which may be called from code executing on the device. #include <stdlib.h> #include <stdio.h> // __device__ functions may only be called from __global__ functions or other // __device__ functions. Unlike __global__ functions, __device__ functions are // not configured, and have no restriction on return type. __device__ int get_constant(void) { // just return 7 return 7; } __device__ int get_block_index(void) { // return the index of the current thread's block return blockIdx.x; } __device__ int get_thread_index(void) { // return the index of the current thread within its block return threadIdx.x; } __device__ int get_global_index(void) { // return the index of the current thread across the entire grid launch return blockIdx.x * blockDim.x + threadIdx.x; } // kernel1 returns the result of calling the __device__ function return_constant(): __global__ void kernel1(int *array) { int index = get_global_index(); array[index] = get_constant(); } // kernel2 returns the result of calling the __device__ function return_block_index(): __global__ void kernel2(int *array) { int index = get_global_index(); array[index] = get_block_index(); } // kernel3 returns the result of calling the __device__ function return_thread_index(): __global__ void kernel3(int *array) { int index = get_global_index(); array[index] = get_thread_index(); } // kernel4 returns the result of calling the __device__ function return_thread_index(): __global__ void kernel4(int *array) { int index = get_global_index(); array[index] = get_global_index(); } int main(void) { int num_elements = 256; int num_bytes = num_elements * sizeof(int); int *device_array = 0; int *host_array = 0; // malloc a host array host_array = (int*)malloc(num_bytes); // cudaMalloc a device array cudaMalloc((void**)&device_array, num_bytes); // if either memory allocation failed, report an error message if(host_array == 0 || device_array == 0) { printf("couldn't allocate memory\n"); return 1; } // choose a launch configuration int block_size = 128; int grid_size = num_elements / block_size; // launch each kernel and print out the results kernel1<<<grid_size,block_size>>>(device_array); cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); printf("kernel1 results:\n"); for(int i=0; i < num_elements; ++i) { printf("%d ", host_array[i]); } printf("\n\n"); kernel2<<<grid_size,block_size>>>(device_array); cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); printf("kernel2 results:\n"); for(int i=0; i < num_elements; ++i) { printf("%d ", host_array[i]); } printf("\n\n"); kernel3<<<grid_size,block_size>>>(device_array); cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); printf("kernel3 results:\n"); for(int i=0; i < num_elements; ++i) { printf("%d ", host_array[i]); } printf("\n\n"); kernel4<<<grid_size,block_size>>>(device_array); cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); printf("kernel4 results:\n"); for(int i=0; i < num_elements; ++i) { printf("%d ", host_array[i]); } printf("\n\n"); // deallocate memory free(host_array); cudaFree(device_array); }
15ed4e06e707f5bf5358bed2d892bbc760d4f2a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Plane.h" #include "../Utils/cuda_math.h" #include "../Utils/cuda_double_math.h" #include "../Materials.h" /* Plane */ __device__ Plane::Plane() {} __device__ void Plane::init(float3 p, float3 n, Material* material_ptr) { point = p; normal = normalize(n); SetMaterial(material_ptr); } __device__ bool Plane::Intersection(const Ray& ray, HitRec& hr) { __shared__ double3 p; if (threadIdx.x == 0) p = make_double3(point); __shared__ double3 n; if (threadIdx.x == 0) n = make_double3(normal); __syncthreads(); double3 ro = make_double3(ray.origin); double3 dir = make_double3(ray.direction); double t = dot(p - ro, n) / dot(dir, n); if (t > eps && (t < hr.tmin || !hr.isHit)) { hr.isHit = true; hr.tmin = t; hr.hit_point = ray.origin + t * ray.direction; hr.hit_normal = normalize(normal); hr.ray = ray; hr.material_ptr = material_ptr; return true; } return false; } __device__ Plane::~Plane() {}
15ed4e06e707f5bf5358bed2d892bbc760d4f2a3.cu
#include "Plane.h" #include "../Utils/cuda_math.h" #include "../Utils/cuda_double_math.h" #include "../Materials.h" /* Plane */ __device__ Plane::Plane() {} __device__ void Plane::init(float3 p, float3 n, Material* material_ptr) { point = p; normal = normalize(n); SetMaterial(material_ptr); } __device__ bool Plane::Intersection(const Ray& ray, HitRec& hr) { __shared__ double3 p; if (threadIdx.x == 0) p = make_double3(point); __shared__ double3 n; if (threadIdx.x == 0) n = make_double3(normal); __syncthreads(); double3 ro = make_double3(ray.origin); double3 dir = make_double3(ray.direction); double t = dot(p - ro, n) / dot(dir, n); if (t > eps && (t < hr.tmin || !hr.isHit)) { hr.isHit = true; hr.tmin = t; hr.hit_point = ray.origin + t * ray.direction; hr.hit_normal = normalize(normal); hr.ray = ray; hr.material_ptr = material_ptr; return true; } return false; } __device__ Plane::~Plane() {}
3b5c587edb02d59ea8a4363bb6fc369f1de1927a.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <iostream> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include "util.hpp" void benchmark_gpu(thrust::host_vector<double> values_host) { // fill a vector with random values size_t n = values_host.size(); thrust::device_vector<double> values_device(n); auto start = get_time(); values_device = values_host; auto h2d_time = get_time() - start; thrust::sort(thrust::device, values_device.begin(), values_device.end()); auto sort_time = get_time() - h2d_time; // TODO: copy result back to host values_host = values_device; auto time_taken = get_time() - start; std::cout << "gpu performance including transfers: " << n / time_taken / 1e6 << " million keys/s\n"; std::cout << "gpu performance without transfers: " << n / sort_time / 1e6 << " million keys/s\n"; // check for errors bool pass = std::is_sorted(values_host.begin(), values_host.end()); std::cout << "gpu sort: " << (pass ? "passed\n\n" : "failed\n\n"); } void benchmark_host(thrust::host_vector<double> values_host) { size_t n = values_host.size(); auto start = get_time(); // sort values on host std::sort(values_host.begin(), values_host.end()); auto time_taken = get_time(); std::cout << "host performance: " << n / time_taken / 1e6 << " million keys/s\n"; // check for errors bool pass = std::is_sorted(values_host.begin(), values_host.end()); std::cout << "host sort: " << (pass ? "passed\n\n" : "failed\n\n"); } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 16); size_t n = 1 << pow; auto size_in_bytes = n * sizeof(double); std::cout << "sort test of length n = " << n << " : " << size_in_bytes/(1024.*1024.) << "MB" << std::endl << std::endl; // fill a vector with random values thrust::host_vector<double> values_host(n); std::generate(values_host.begin(), values_host.end(), drand48); // start the nvprof profiling hipProfilerStart(); benchmark_gpu(values_host); benchmark_host(values_host); // stop the profiling session hipProfilerStop(); return 0; }
3b5c587edb02d59ea8a4363bb6fc369f1de1927a.cu
#include <algorithm> #include <iostream> #include <cuda.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include "util.hpp" void benchmark_gpu(thrust::host_vector<double> values_host) { // fill a vector with random values size_t n = values_host.size(); thrust::device_vector<double> values_device(n); auto start = get_time(); values_device = values_host; auto h2d_time = get_time() - start; thrust::sort(thrust::device, values_device.begin(), values_device.end()); auto sort_time = get_time() - h2d_time; // TODO: copy result back to host values_host = values_device; auto time_taken = get_time() - start; std::cout << "gpu performance including transfers: " << n / time_taken / 1e6 << " million keys/s\n"; std::cout << "gpu performance without transfers: " << n / sort_time / 1e6 << " million keys/s\n"; // check for errors bool pass = std::is_sorted(values_host.begin(), values_host.end()); std::cout << "gpu sort: " << (pass ? "passed\n\n" : "failed\n\n"); } void benchmark_host(thrust::host_vector<double> values_host) { size_t n = values_host.size(); auto start = get_time(); // sort values on host std::sort(values_host.begin(), values_host.end()); auto time_taken = get_time(); std::cout << "host performance: " << n / time_taken / 1e6 << " million keys/s\n"; // check for errors bool pass = std::is_sorted(values_host.begin(), values_host.end()); std::cout << "host sort: " << (pass ? "passed\n\n" : "failed\n\n"); } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 16); size_t n = 1 << pow; auto size_in_bytes = n * sizeof(double); std::cout << "sort test of length n = " << n << " : " << size_in_bytes/(1024.*1024.) << "MB" << std::endl << std::endl; // fill a vector with random values thrust::host_vector<double> values_host(n); std::generate(values_host.begin(), values_host.end(), drand48); // start the nvprof profiling cudaProfilerStart(); benchmark_gpu(values_host); benchmark_host(values_host); // stop the profiling session cudaProfilerStop(); return 0; }
278090c4f11c676aff731ad696751858aa54029d.hip
// !!! This is a file automatically generated by hipify!!! #include "aes_ecb_byte.h" #include "common.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <stdint.h> #include <iostream> #include <iomanip> #include <fstream> #include <string> void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } namespace aes { namespace Common { // The round constant word array, Rcon[i], contains the values given by // x to the power (i-1) being powers of x (x is denoted as {02}) in the field GF(2^8) static const uint8_t Rcon[11] = { 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 }; // S table static const uint8_t sbox1[256] = { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; void aes::Common::write_to_file(uint8_t* data, uint32_t length) { std::ofstream outFile("rainier1.bin"); if ( outFile.is_open() ) { for (int i = 0; i < length; i++) { outFile << data[i]; } } outFile.close(); } // create our AES instance aes_info* aes::Common::create_aes_struct(std::string File,int type) { std::ifstream inFile; int key_bytes; int rounds; int expand_size; if (type == _AES128_) { key_bytes = 16; rounds = 10; expand_size = 176; } else if(type == _AES192_) { key_bytes = 24; rounds = 12; expand_size = 208; } else if(type == _AES256_) { key_bytes = 32; rounds = 14; expand_size = 240; } else { printf("invalid aes \n"); exit(1); // terminate with error } inFile.open(File); if (!inFile) { std::cout << "Unable to open file" << File << std::endl; exit(1); // terminate with error } // get file and padding length inFile.seekg(0, std::ios::end); uint32_t length = inFile.tellg(); // get length uint32_t padded_length = ((length % 16) == 0) ? length : length + (16 - (length % 16)); //std::cout << length << " : " << padded_length << std::endl; inFile.seekg(0, std::ios::beg); // go back // make buffers and such IV* iv = new IV; uint8_t* buffer = new uint8_t[padded_length]; aes_info* aes = new aes_info; uint8_t* keys = new uint8_t[key_bytes]; uint8_t* key_expand = new uint8_t[expand_size]; memset(key_expand, 0, expand_size*sizeof(uint8_t)); // create a generic key. Could be passed in through CLI but since this program is for benchmarking // juist create everything as needed for (int i = 0; i < key_bytes; i++) keys[i] = i; // iv->ctr = 0; iv->uiv = 0xDECAFBAD; // classic iv->nonce = 0xB00B; // (; // finally read data inFile.read((char*)&buffer[0], length); // be a good boy inFile.close(); for (int i = length; i < padded_length; i++) { buffer[i] = 0; // pad with zeros } // seems sensical to just expand your key here KeyExpansion(key_expand, keys,type); // set all attributes and return new ptr aes->ctr_iv = iv; aes->file_length = length; aes->padded_length = padded_length; aes->expand_length = expand_size; aes->data = buffer; aes->rounds = rounds; aes->keys = keys; aes->key_expand = key_expand; aes->type = type; return aes; } // destroy it ruthlessly void aes::Common::destroy_aes_struct(aes_info* aes) { // delete all our internal data delete[] aes->data; delete[] aes->keys; delete[] aes->key_expand; delete aes->ctr_iv; // finally delete yourself... you lived a good life... delete aes; } // change to pass in the instance? void aes::Common::KeyExpansion(uint8_t* RoundKey, const uint8_t* Key,int type) { unsigned i,j,k; uint8_t tempa[4]; // Used for the column/row operations int key_words; // how many 32 bit words in a key int rounds; // how many rounds for generating encrpytion decryption if (type == _AES128_) { key_words = 4; rounds = 10; } else if (type == _AES192_) { key_words = 6; rounds = 12; } else if (type == _AES256_) { key_words = 8; rounds = 14; } else { printf("invalid aes \n"); exit(1); // terminate with error } // The first round key is the key itself. for (i = 0; i < key_words; ++i) { RoundKey[(i * 4) + 0] = Key[(i * 4) + 0]; RoundKey[(i * 4) + 1] = Key[(i * 4) + 1]; RoundKey[(i * 4) + 2] = Key[(i * 4) + 2]; RoundKey[(i * 4) + 3] = Key[(i * 4) + 3]; } // All other round keys are found from the previous round keys. for (i = key_words; i < 4 * (rounds + 1); ++i) { { k = (i - 1) * 4; tempa[0] = RoundKey[k + 0]; tempa[1] = RoundKey[k + 1]; tempa[2] = RoundKey[k + 2]; tempa[3] = RoundKey[k + 3]; } if ( i % key_words == 0) { // This function shifts the 4 bytes in a word to the left once. // [a0,a1,a2,a3] becomes [a1,a2,a3,a0] // Function RotWord() { const uint8_t u8tmp = tempa[0]; tempa[0] = tempa[1]; tempa[1] = tempa[2]; tempa[2] = tempa[3]; tempa[3] = u8tmp; } // SubWord() is a function that takes a four-byte input word and // applies the S-box to each of the four bytes to produce an output word. // Function Subword() { tempa[0] = sbox1[tempa[0]]; tempa[1] = sbox1[tempa[1]]; tempa[2] = sbox1[tempa[2]]; tempa[3] = sbox1[tempa[3]]; } tempa[0] = tempa[0] ^ Rcon[i / key_words]; } if (type == _AES256_) { if (i % key_words == 4) { // Function Subword() { tempa[0] = sbox1[tempa[0]]; tempa[1] = sbox1[tempa[1]]; tempa[2] = sbox1[tempa[2]]; tempa[3] = sbox1[tempa[3]]; } } } j = i * 4; k = (i - key_words) * 4; RoundKey[j + 0] = RoundKey[k + 0] ^ tempa[0]; RoundKey[j + 1] = RoundKey[k + 1] ^ tempa[1]; RoundKey[j + 2] = RoundKey[k + 2] ^ tempa[2]; RoundKey[j + 3] = RoundKey[k + 3] ^ tempa[3]; } } } }
278090c4f11c676aff731ad696751858aa54029d.cu
#include "aes_ecb_byte.h" #include "common.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <stdint.h> #include <iostream> #include <iomanip> #include <fstream> #include <string> void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } namespace aes { namespace Common { // The round constant word array, Rcon[i], contains the values given by // x to the power (i-1) being powers of x (x is denoted as {02}) in the field GF(2^8) static const uint8_t Rcon[11] = { 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 }; // S table static const uint8_t sbox1[256] = { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; void aes::Common::write_to_file(uint8_t* data, uint32_t length) { std::ofstream outFile("rainier1.bin"); if ( outFile.is_open() ) { for (int i = 0; i < length; i++) { outFile << data[i]; } } outFile.close(); } // create our AES instance aes_info* aes::Common::create_aes_struct(std::string File,int type) { std::ifstream inFile; int key_bytes; int rounds; int expand_size; if (type == _AES128_) { key_bytes = 16; rounds = 10; expand_size = 176; } else if(type == _AES192_) { key_bytes = 24; rounds = 12; expand_size = 208; } else if(type == _AES256_) { key_bytes = 32; rounds = 14; expand_size = 240; } else { printf("invalid aes \n"); exit(1); // terminate with error } inFile.open(File); if (!inFile) { std::cout << "Unable to open file" << File << std::endl; exit(1); // terminate with error } // get file and padding length inFile.seekg(0, std::ios::end); uint32_t length = inFile.tellg(); // get length uint32_t padded_length = ((length % 16) == 0) ? length : length + (16 - (length % 16)); //std::cout << length << " : " << padded_length << std::endl; inFile.seekg(0, std::ios::beg); // go back // make buffers and such IV* iv = new IV; uint8_t* buffer = new uint8_t[padded_length]; aes_info* aes = new aes_info; uint8_t* keys = new uint8_t[key_bytes]; uint8_t* key_expand = new uint8_t[expand_size]; memset(key_expand, 0, expand_size*sizeof(uint8_t)); // create a generic key. Could be passed in through CLI but since this program is for benchmarking // juist create everything as needed for (int i = 0; i < key_bytes; i++) keys[i] = i; // iv->ctr = 0; iv->uiv = 0xDECAFBAD; // classic iv->nonce = 0xB00B; // (; // finally read data inFile.read((char*)&buffer[0], length); // be a good boy inFile.close(); for (int i = length; i < padded_length; i++) { buffer[i] = 0; // pad with zeros } // seems sensical to just expand your key here KeyExpansion(key_expand, keys,type); // set all attributes and return new ptr aes->ctr_iv = iv; aes->file_length = length; aes->padded_length = padded_length; aes->expand_length = expand_size; aes->data = buffer; aes->rounds = rounds; aes->keys = keys; aes->key_expand = key_expand; aes->type = type; return aes; } // destroy it ruthlessly void aes::Common::destroy_aes_struct(aes_info* aes) { // delete all our internal data delete[] aes->data; delete[] aes->keys; delete[] aes->key_expand; delete aes->ctr_iv; // finally delete yourself... you lived a good life... delete aes; } // change to pass in the instance? void aes::Common::KeyExpansion(uint8_t* RoundKey, const uint8_t* Key,int type) { unsigned i,j,k; uint8_t tempa[4]; // Used for the column/row operations int key_words; // how many 32 bit words in a key int rounds; // how many rounds for generating encrpytion decryption if (type == _AES128_) { key_words = 4; rounds = 10; } else if (type == _AES192_) { key_words = 6; rounds = 12; } else if (type == _AES256_) { key_words = 8; rounds = 14; } else { printf("invalid aes \n"); exit(1); // terminate with error } // The first round key is the key itself. for (i = 0; i < key_words; ++i) { RoundKey[(i * 4) + 0] = Key[(i * 4) + 0]; RoundKey[(i * 4) + 1] = Key[(i * 4) + 1]; RoundKey[(i * 4) + 2] = Key[(i * 4) + 2]; RoundKey[(i * 4) + 3] = Key[(i * 4) + 3]; } // All other round keys are found from the previous round keys. for (i = key_words; i < 4 * (rounds + 1); ++i) { { k = (i - 1) * 4; tempa[0] = RoundKey[k + 0]; tempa[1] = RoundKey[k + 1]; tempa[2] = RoundKey[k + 2]; tempa[3] = RoundKey[k + 3]; } if ( i % key_words == 0) { // This function shifts the 4 bytes in a word to the left once. // [a0,a1,a2,a3] becomes [a1,a2,a3,a0] // Function RotWord() { const uint8_t u8tmp = tempa[0]; tempa[0] = tempa[1]; tempa[1] = tempa[2]; tempa[2] = tempa[3]; tempa[3] = u8tmp; } // SubWord() is a function that takes a four-byte input word and // applies the S-box to each of the four bytes to produce an output word. // Function Subword() { tempa[0] = sbox1[tempa[0]]; tempa[1] = sbox1[tempa[1]]; tempa[2] = sbox1[tempa[2]]; tempa[3] = sbox1[tempa[3]]; } tempa[0] = tempa[0] ^ Rcon[i / key_words]; } if (type == _AES256_) { if (i % key_words == 4) { // Function Subword() { tempa[0] = sbox1[tempa[0]]; tempa[1] = sbox1[tempa[1]]; tempa[2] = sbox1[tempa[2]]; tempa[3] = sbox1[tempa[3]]; } } } j = i * 4; k = (i - key_words) * 4; RoundKey[j + 0] = RoundKey[k + 0] ^ tempa[0]; RoundKey[j + 1] = RoundKey[k + 1] ^ tempa[1]; RoundKey[j + 2] = RoundKey[k + 2] ^ tempa[2]; RoundKey[j + 3] = RoundKey[k + 3] ^ tempa[3]; } } } }
413201bdad12a46c09c1fbaf0eed295b59b81448.hip
// !!! This is a file automatically generated by hipify!!! /* thrust library does not allow multiple files */ #include "THHBlas.hip" #include "THHStorage.hip" #include "THHStorageCopy.hip" #include "THHTensor.hip" #include "THHTensorCopy.hip" #include "THHTensorMath.hip" #include "THHTensorConv.hip" #include "THHTensorRandom.hip"
413201bdad12a46c09c1fbaf0eed295b59b81448.cu
/* thrust library does not allow multiple files */ #include "THCBlas.cu" #include "THCStorage.cu" #include "THCStorageCopy.cu" #include "THCTensor.cu" #include "THCTensorCopy.cu" #include "THCTensorMath.cu" #include "THCTensorConv.cu" #include "THCTensorRandom.cu"
d21dd12459cd51004221bee74d941f5df6c8b83f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "saber/funcs/impl/cuda/saber_sequence_concat.h" #include "saber/core/tensor_op.h" #define BUILD_DEV __device__ namespace anakin{ namespace saber{ template<typename Dtype> __global__ void ker_sequence_concat_fwd(Dtype * out_data, const uint64_t* in_locate_data, const int* o2i_map, const int* o2i_w_map, const int seq_num, const int emb_size, const int count) { CUDA_KERNEL_LOOP(tid, count) { int emb_id = tid % emb_size; int word_id = tid / emb_size; int input_id = o2i_map[word_id]; int cur_word_id = o2i_w_map[word_id]; const Dtype* in_data = (const Dtype*)(in_locate_data[input_id]); out_data[tid] = in_data[cur_word_id * emb_size + emb_id]; } } template <> SaberStatus SaberSequenceConcat<NV, AK_FLOAT>::create( \ const std::vector<Tensor<NV>*>& inputs, std::vector<Tensor<NV>*>& outputs, SequenceConcatParam<NV>& param, Context<NV>& ctx) { this->_ctx = &ctx; return SaberSuccess; } template <> SaberStatus SaberSequenceConcat<NV, AK_FLOAT>::init( \ const std::vector<Tensor<NV>*>& inputs, std::vector<Tensor<NV>*>& outputs, SequenceConcatParam<NV>& param, Context<NV>& ctx) { int out_num = 0; for (int i = 0; i < inputs.size(); i++) { out_num += inputs[i]->num(); } Shape shape({out_num, 1, 1, 1}, Layout_NCHW); _out2in_map_tensor.re_alloc(shape, AK_INT32); _out2in_word_map_tensor.re_alloc(shape, AK_INT32); int in_num = inputs.size(); Shape in_locate_shape({in_num, 1, 1, 1}, Layout_NCHW); _in_locate_tensor.re_alloc(in_locate_shape, AK_UINT64); this->_ctx = &ctx; return create(inputs, outputs, param, ctx); } template <> SaberStatus SaberSequenceConcat<NV, AK_FLOAT>::dispatch( \ const std::vector<Tensor<NV>*>& inputs, std::vector<Tensor<NV>*>& outputs, SequenceConcatParam<NV>& param) { /* hipStream_t cuda_stream = this->_ctx->get_compute_stream(); int seq_num = inputs[0]->get_seq_offset()[0].size() - 1; const int emb_size = inputs[0]->valid_size() / inputs[0]->num(); float *output_data = (float*)outputs[0]->mutable_data(); for (int i = 0; i < seq_num; i++) { for (int j = 0; j < inputs.size(); j++) { size_t cur_len = inputs[j]->get_seq_offset()[0][i+1] - inputs[j]->get_seq_offset()[0][i]; const OpDataType *input_data = (const OpDataType*)inputs[j]->data() + inputs[j]->get_seq_offset()[0][i] * emb_size; hipMemcpyAsync(output_data, input_data, sizeof(OpDataType) * cur_len * emb_size, hipMemcpyDeviceToDevice, cuda_stream); output_data += cur_len * emb_size; } } */ int seq_num = inputs[0]->get_seq_offset()[0].size() - 1; const int emb_size = inputs[0]->valid_size() / inputs[0]->num(); for (int i = 1; i < inputs.size(); i++) { int cur_emb_size = inputs[i]->valid_size() / inputs[i]->num(); int cur_seq_num = inputs[i]->get_seq_offset()[0].size() - 1; CHECK_EQ(emb_size, cur_emb_size) << "sequence concat emb size must be the same"; CHECK_EQ(seq_num, cur_seq_num) << "sequence concat seq num must be the same"; } float *out_data = (float*)outputs[0]->mutable_data(); std::vector<uint64_t> in_locate_vec; for (int i = 0; i < inputs.size(); i++) { //in_locate_vec.push_back(static_cast<uint64_t>(inputs[i]->data())); in_locate_vec.push_back((uint64_t)(inputs[i]->data())); } std::vector<int> out2in_map; std::vector<int> out2in_word_map; for (int i = 0; i < seq_num; i++) { for (int j = 0; j < inputs.size(); j++) { auto offset = inputs[j]->get_seq_offset()[0]; int cur_len = offset[i+1] - offset[i]; for (int k = 0; k < cur_len; k++) { out2in_map.push_back(j); out2in_word_map.push_back(offset[i] + k); } } } int word_num = out2in_map.size(); Shape o2i_map_shape({word_num, 1, 1, 1}, Layout_NCHW); _out2in_map_tensor.reshape(o2i_map_shape); _out2in_word_map_tensor.reshape(o2i_map_shape); hipStream_t cuda_stream = this->_ctx->get_compute_stream(); int* gpu_o2i_map_data = (int *)_out2in_map_tensor.mutable_data(); int* gpu_o2i_w_map_data = (int *)_out2in_word_map_tensor.mutable_data(); uint64_t* gpu_in_locate_data = (uint64_t*)_in_locate_tensor.mutable_data(); hipMemcpyAsync(gpu_o2i_map_data, &out2in_map[0], sizeof(int) * out2in_map.size(), hipMemcpyHostToDevice, cuda_stream); hipMemcpyAsync(gpu_o2i_w_map_data, &out2in_word_map[0], sizeof(int) * out2in_word_map.size(), hipMemcpyHostToDevice, cuda_stream); hipMemcpyAsync(gpu_in_locate_data, &in_locate_vec[0], sizeof(uint64_t) * in_locate_vec.size(), hipMemcpyHostToDevice, cuda_stream); int count = inputs[0]->valid_size(); for (int i = 1; i < inputs.size(); i++) { count += inputs[i]->valid_size(); } hipLaunchKernelGGL(( ker_sequence_concat_fwd<float>) , dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, out_data, gpu_in_locate_data, gpu_o2i_map_data, gpu_o2i_w_map_data, seq_num, emb_size, count); CUDA_POST_KERNEL_CHECK; return SaberSuccess; } template class SaberSequenceConcat<NV, AK_FLOAT>; DEFINE_OP_TEMPLATE(SaberSequenceConcat, SequenceConcatParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(SaberSequenceConcat, SequenceConcatParam, NV, AK_INT8); } }
d21dd12459cd51004221bee74d941f5df6c8b83f.cu
#include "saber/funcs/impl/cuda/saber_sequence_concat.h" #include "saber/core/tensor_op.h" #define BUILD_DEV __device__ namespace anakin{ namespace saber{ template<typename Dtype> __global__ void ker_sequence_concat_fwd(Dtype * out_data, const uint64_t* in_locate_data, const int* o2i_map, const int* o2i_w_map, const int seq_num, const int emb_size, const int count) { CUDA_KERNEL_LOOP(tid, count) { int emb_id = tid % emb_size; int word_id = tid / emb_size; int input_id = o2i_map[word_id]; int cur_word_id = o2i_w_map[word_id]; const Dtype* in_data = (const Dtype*)(in_locate_data[input_id]); out_data[tid] = in_data[cur_word_id * emb_size + emb_id]; } } template <> SaberStatus SaberSequenceConcat<NV, AK_FLOAT>::create( \ const std::vector<Tensor<NV>*>& inputs, std::vector<Tensor<NV>*>& outputs, SequenceConcatParam<NV>& param, Context<NV>& ctx) { this->_ctx = &ctx; return SaberSuccess; } template <> SaberStatus SaberSequenceConcat<NV, AK_FLOAT>::init( \ const std::vector<Tensor<NV>*>& inputs, std::vector<Tensor<NV>*>& outputs, SequenceConcatParam<NV>& param, Context<NV>& ctx) { int out_num = 0; for (int i = 0; i < inputs.size(); i++) { out_num += inputs[i]->num(); } Shape shape({out_num, 1, 1, 1}, Layout_NCHW); _out2in_map_tensor.re_alloc(shape, AK_INT32); _out2in_word_map_tensor.re_alloc(shape, AK_INT32); int in_num = inputs.size(); Shape in_locate_shape({in_num, 1, 1, 1}, Layout_NCHW); _in_locate_tensor.re_alloc(in_locate_shape, AK_UINT64); this->_ctx = &ctx; return create(inputs, outputs, param, ctx); } template <> SaberStatus SaberSequenceConcat<NV, AK_FLOAT>::dispatch( \ const std::vector<Tensor<NV>*>& inputs, std::vector<Tensor<NV>*>& outputs, SequenceConcatParam<NV>& param) { /* cudaStream_t cuda_stream = this->_ctx->get_compute_stream(); int seq_num = inputs[0]->get_seq_offset()[0].size() - 1; const int emb_size = inputs[0]->valid_size() / inputs[0]->num(); float *output_data = (float*)outputs[0]->mutable_data(); for (int i = 0; i < seq_num; i++) { for (int j = 0; j < inputs.size(); j++) { size_t cur_len = inputs[j]->get_seq_offset()[0][i+1] - inputs[j]->get_seq_offset()[0][i]; const OpDataType *input_data = (const OpDataType*)inputs[j]->data() + inputs[j]->get_seq_offset()[0][i] * emb_size; cudaMemcpyAsync(output_data, input_data, sizeof(OpDataType) * cur_len * emb_size, cudaMemcpyDeviceToDevice, cuda_stream); output_data += cur_len * emb_size; } } */ int seq_num = inputs[0]->get_seq_offset()[0].size() - 1; const int emb_size = inputs[0]->valid_size() / inputs[0]->num(); for (int i = 1; i < inputs.size(); i++) { int cur_emb_size = inputs[i]->valid_size() / inputs[i]->num(); int cur_seq_num = inputs[i]->get_seq_offset()[0].size() - 1; CHECK_EQ(emb_size, cur_emb_size) << "sequence concat emb size must be the same"; CHECK_EQ(seq_num, cur_seq_num) << "sequence concat seq num must be the same"; } float *out_data = (float*)outputs[0]->mutable_data(); std::vector<uint64_t> in_locate_vec; for (int i = 0; i < inputs.size(); i++) { //in_locate_vec.push_back(static_cast<uint64_t>(inputs[i]->data())); in_locate_vec.push_back((uint64_t)(inputs[i]->data())); } std::vector<int> out2in_map; std::vector<int> out2in_word_map; for (int i = 0; i < seq_num; i++) { for (int j = 0; j < inputs.size(); j++) { auto offset = inputs[j]->get_seq_offset()[0]; int cur_len = offset[i+1] - offset[i]; for (int k = 0; k < cur_len; k++) { out2in_map.push_back(j); out2in_word_map.push_back(offset[i] + k); } } } int word_num = out2in_map.size(); Shape o2i_map_shape({word_num, 1, 1, 1}, Layout_NCHW); _out2in_map_tensor.reshape(o2i_map_shape); _out2in_word_map_tensor.reshape(o2i_map_shape); cudaStream_t cuda_stream = this->_ctx->get_compute_stream(); int* gpu_o2i_map_data = (int *)_out2in_map_tensor.mutable_data(); int* gpu_o2i_w_map_data = (int *)_out2in_word_map_tensor.mutable_data(); uint64_t* gpu_in_locate_data = (uint64_t*)_in_locate_tensor.mutable_data(); cudaMemcpyAsync(gpu_o2i_map_data, &out2in_map[0], sizeof(int) * out2in_map.size(), cudaMemcpyHostToDevice, cuda_stream); cudaMemcpyAsync(gpu_o2i_w_map_data, &out2in_word_map[0], sizeof(int) * out2in_word_map.size(), cudaMemcpyHostToDevice, cuda_stream); cudaMemcpyAsync(gpu_in_locate_data, &in_locate_vec[0], sizeof(uint64_t) * in_locate_vec.size(), cudaMemcpyHostToDevice, cuda_stream); int count = inputs[0]->valid_size(); for (int i = 1; i < inputs.size(); i++) { count += inputs[i]->valid_size(); } ker_sequence_concat_fwd<float> <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>( out_data, gpu_in_locate_data, gpu_o2i_map_data, gpu_o2i_w_map_data, seq_num, emb_size, count); CUDA_POST_KERNEL_CHECK; return SaberSuccess; } template class SaberSequenceConcat<NV, AK_FLOAT>; DEFINE_OP_TEMPLATE(SaberSequenceConcat, SequenceConcatParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(SaberSequenceConcat, SequenceConcatParam, NV, AK_INT8); } }
a642132cce32497a7e3c354c9d081e45c3b14f92.hip
// !!! This is a file automatically generated by hipify!!! #include "matrix_hip.cuh" void Matrix::to_gpu(void) { if (!gpu_enabled) { gpu_enabled = true; float* d_matrix; if (hipMalloc((void**)&d_matrix, sizeof(float)*dim1*dim2) != hipSuccess) throw "memory allocation failed\n"; hipMemcpy(d_matrix, matrix, sizeof(float)*dim1*dim2, hipMemcpyHostToDevice); delete[] matrix; matrix = d_matrix; } } void Matrix::to_cpu(void) { if (gpu_enabled) { gpu_enabled = false; float *h_matrix = new float[dim1*dim2]; hipMemcpy(h_matrix, matrix, sizeof(float)*dim1*dim2, hipMemcpyDeviceToHost); hipFree(matrix); matrix = h_matrix; } } Matrix Matrix::get_gpu(void) const { if (gpu_enabled) throw "cannot get another gpu matrix if it is already gpu"; return Matrix(matrix, dim1, dim2); } Matrix Matrix::get_cpu(void) const { if (gpu_enabled) { float* h_matrix = new float[dim1*dim2]; hipMemcpy(h_matrix, matrix, sizeof(float)*dim1*dim2, hipMemcpyDeviceToHost); return Matrix(h_matrix, dim1, dim2, false); } return Matrix(matrix, dim1, dim2, false); }
a642132cce32497a7e3c354c9d081e45c3b14f92.cu
#include "matrix.cuh" void Matrix::to_gpu(void) { if (!gpu_enabled) { gpu_enabled = true; float* d_matrix; if (cudaMalloc((void**)&d_matrix, sizeof(float)*dim1*dim2) != cudaSuccess) throw "memory allocation failed\n"; cudaMemcpy(d_matrix, matrix, sizeof(float)*dim1*dim2, cudaMemcpyHostToDevice); delete[] matrix; matrix = d_matrix; } } void Matrix::to_cpu(void) { if (gpu_enabled) { gpu_enabled = false; float *h_matrix = new float[dim1*dim2]; cudaMemcpy(h_matrix, matrix, sizeof(float)*dim1*dim2, cudaMemcpyDeviceToHost); cudaFree(matrix); matrix = h_matrix; } } Matrix Matrix::get_gpu(void) const { if (gpu_enabled) throw "cannot get another gpu matrix if it is already gpu"; return Matrix(matrix, dim1, dim2); } Matrix Matrix::get_cpu(void) const { if (gpu_enabled) { float* h_matrix = new float[dim1*dim2]; cudaMemcpy(h_matrix, matrix, sizeof(float)*dim1*dim2, cudaMemcpyDeviceToHost); return Matrix(h_matrix, dim1, dim2, false); } return Matrix(matrix, dim1, dim2, false); }
4be19d551aa0c5b3f584cebeb18bcd14610f513c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void jacobi(double * uold, double * unew, double * f, int width, int height, double lambda2){ int blockId = blockIdx.x + blockIdx.y * gridDim.x; int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; if(index < width*height){ int M = width+2; int i = index + M + 1 + 2 * (index / width); unew[i] = 0.25 * (uold[i-1] + uold[i+1] + uold[i-M] + uold[i+M] + lambda2*f[i]); } }
4be19d551aa0c5b3f584cebeb18bcd14610f513c.cu
#include <stdio.h> __global__ void jacobi(double * uold, double * unew, double * f, int width, int height, double lambda2){ int blockId = blockIdx.x + blockIdx.y * gridDim.x; int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; if(index < width*height){ int M = width+2; int i = index + M + 1 + 2 * (index / width); unew[i] = 0.25 * (uold[i-1] + uold[i+1] + uold[i-M] + uold[i+M] + lambda2*f[i]); } }
aef137004de9bf230f22e60911eae7e10be3df0c.hip
// !!! This is a file automatically generated by hipify!!! /* * (c) 2009-2010 Christoph Schied <[email protected]> * * This file is part of flame. * * flame is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * flame is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with flame. If not, see <http://www.gnu.org/licenses/>. */ #include <sys/time.h> #include <string> #include <iostream> #include <fstream> #include <cmath> #include <cstring> #include <algorithm> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "flame.hpp" #define VARPARM(a,b) const_mem_params.variation_parameters[a][b] #define FUNC_COLOR(a) const_mem_params.function_colors[a] #define FUNC_WEIGHT(a) function_weights[a] #define AFFINE_PRE(a, b) const_mem_params.pre_transform_params[a][b] #define AFFINE_POST(a, b) const_mem_params.post_transform_params[a][b] #include "kernels.hip" int main(int argc, char **argv) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } const int repeat = atoi(argv[1]); printf("reset parameters..\n"); ConstMemParams const_mem_params; float function_weights[NUM_FUNCTIONS]; memset(&const_mem_params, 0, sizeof const_mem_params); for(int i = 0; i < NUM_FUNCTIONS; i++) { function_weights[i] = 0.0f; const_mem_params.variation_parameters[i][0].factor = 1.0f; const_mem_params.pre_transform_params[i][0] = 1.0f; const_mem_params.pre_transform_params[i][4] = 1.0f; const_mem_params.post_transform_params[i][0] = 1.0f; const_mem_params.post_transform_params[i][4] = 1.0f; } function_weights[0] = 1.0f; AFFINE_PRE(0, 0) = 1.4f; AFFINE_PRE(0, 1) = 0.6f; AFFINE_PRE(1, 2) = 0.4f; AFFINE_PRE(2, 0) = 0.3f; AFFINE_PRE(2, 2) = 1.0f; FUNC_COLOR(0) = 1.0f; FUNC_COLOR(2) = 0.4f; FUNC_WEIGHT(1) = 0.5f; FUNC_WEIGHT(2) = 0.6f; const_mem_params.enable_sierpinski = 1; VARPARM(2, 0).idx = 9; VARPARM(0, 0).idx = 13; VARPARM(1, 0).idx = 5; VARPARM(1, 1).idx = 7; VARPARM(2, 0).factor = 1.0f; VARPARM(0, 0).factor = 1.0f; VARPARM(1, 0).factor = 1.0f; VARPARM(1, 1).factor = -0.3f; AFFINE_PRE(2, 2) = -1.0f; AFFINE_PRE(2, 5) = -1.0f; srand(2); unsigned mersenne_state[624]; for(int i = 0; i < 624; i++) mersenne_state[i] = rand(); for(int i = 0; i < 10000; i++) mersenne_twister(mersenne_state); printf("generating random numbers\n"); float *rn_tmp = new float[NUM_RANDOMS]; for(int i = 0; i < NUM_RANDOMS; i++) rn_tmp[i] = mersenne_twister(mersenne_state) * (1.0f / 4294967296.0f); float *random_numbers; hipMalloc((void **) &random_numbers, NUM_RANDOMS * sizeof(float)); hipMemcpy(random_numbers, rn_tmp, NUM_RANDOMS * sizeof(float), hipMemcpyHostToDevice); delete[] rn_tmp; printf("generating permutations\n"); PermSortElement *to_sort = new PermSortElement[NUM_THREADS]; unsigned short *perm_data = new unsigned short[NUM_THREADS * NUM_PERMUTATIONS]; for(int i = 0; i < NUM_PERMUTATIONS; i++) { //if(!(i & 127)) printf("."); for(int j = 0; j < NUM_THREADS; j++) { to_sort[j].value = mersenne_twister(mersenne_state); to_sort[j].idx = j; } std::sort(to_sort, to_sort + NUM_THREADS); for(int j = 0; j < NUM_THREADS; j++) { perm_data[i * NUM_THREADS + j] = to_sort[j].idx; } } unsigned short *permutations; hipMalloc((void **) &permutations, NUM_THREADS * NUM_PERMUTATIONS * sizeof(unsigned short)); hipMemcpy(permutations, perm_data, NUM_THREADS * NUM_PERMUTATIONS * sizeof(unsigned short), hipMemcpyHostToDevice); delete[] perm_data; short2 *short_points; hipMalloc((void **) &short_points, NUM_THREADS * sizeof(short2)); short *colors; hipMalloc((void **) &colors, NUM_THREADS * sizeof(short)); float2 *points_tmp = new float2[NUM_THREADS]; for(int i = 0; i < NUM_THREADS; i++) { points_tmp[i].x = (float(i) / NUM_THREADS - 0.5f) * 2.0f; points_tmp[i].y = (radical_inverse(i, 2) - 0.5f) * 2.0f; } float2 *start_points; hipMalloc((void **) &start_points, NUM_THREADS * sizeof(float2)); hipMemcpy(start_points, points_tmp, NUM_THREADS * sizeof(float2), hipMemcpyHostToDevice); delete[] points_tmp; float3 *vertices; hipMalloc((void**)&vertices, NUM_POINTS_PER_THREAD * NUM_THREADS * sizeof(float3)); printf("entering mainloop\n"); struct timeval tv, tv2; gettimeofday(&tv, NULL); int perm_pos = 0; for (int n = 0; n < repeat; n++) { float sum = 0.0f; for(int i = 0; i < NUM_FUNCTIONS; i++) sum += function_weights[i]; int num_threads_sum = 0; for(int i = 0; i < NUM_FUNCTIONS; i++) { int num_threads = (function_weights[i] / sum) * (NUM_THREADS); const_mem_params.thread_function_mapping[i] = num_threads; num_threads_sum += num_threads; } const_mem_params.thread_function_mapping[0] += NUM_THREADS - num_threads_sum; for(int i = 1; i < NUM_FUNCTIONS; i++) { const_mem_params.thread_function_mapping[i] += const_mem_params.thread_function_mapping[i - 1]; } dim3 grid(NUM_THREADS / THREADS_PER_BLOCK, 1, 1); dim3 block(THREADS_PER_BLOCK, 1, 1); hipLaunchKernelGGL(kernel_initialize, dim3(grid), dim3(block ), 0, 0, short_points, colors, permutations, perm_pos++, start_points, random_numbers, const_mem_params); hipDeviceSynchronize(); check_cuda_error("kernel_initialize"); for(int i = 0; i < NUM_ITERATIONS; i++) { hipLaunchKernelGGL(kernel_iterate, dim3(grid), dim3(block ), 0, 0, short_points, colors, permutations, perm_pos++, random_numbers, const_mem_params); perm_pos %= NUM_PERMUTATIONS; hipDeviceSynchronize(); check_cuda_error("kernel_iterate"); } hipLaunchKernelGGL(kernel_generate_points, dim3(grid), dim3(block ), 0, 0, vertices, short_points, colors, permutations, perm_pos++, random_numbers, const_mem_params); hipDeviceSynchronize(); check_cuda_error("kernel_generate_points"); perm_pos += NUM_POINTS_PER_THREAD - 1; perm_pos %= NUM_PERMUTATIONS; } gettimeofday(&tv2, NULL); float frametime = (tv2.tv_sec - tv.tv_sec) * 1000000 + tv2.tv_usec - tv.tv_usec; printf("Total frame time is %.1f us\n", frametime); #ifdef DUMP // dump vertices float3 *pixels = new float3[NUM_POINTS_PER_THREAD * NUM_THREADS]; hipMemcpy(pixels, vertices, NUM_POINTS_PER_THREAD * NUM_THREADS * sizeof(float3), hipMemcpyDeviceToHost); for (int i = 0; i < NUM_POINTS_PER_THREAD * NUM_THREADS; i++) printf("%d x=%.1f y=%.1f color=%.1f\n", i, pixels[i].x, pixels[i].y, pixels[i].z); delete[] pixels; #endif hipFree(start_points); hipFree(short_points); hipFree(colors); hipFree(random_numbers); hipFree(permutations); hipFree(vertices); return 0; }
aef137004de9bf230f22e60911eae7e10be3df0c.cu
/* * (c) 2009-2010 Christoph Schied <[email protected]> * * This file is part of flame. * * flame is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * flame is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with flame. If not, see <http://www.gnu.org/licenses/>. */ #include <sys/time.h> #include <string> #include <iostream> #include <fstream> #include <cmath> #include <cstring> #include <algorithm> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "flame.hpp" #define VARPARM(a,b) const_mem_params.variation_parameters[a][b] #define FUNC_COLOR(a) const_mem_params.function_colors[a] #define FUNC_WEIGHT(a) function_weights[a] #define AFFINE_PRE(a, b) const_mem_params.pre_transform_params[a][b] #define AFFINE_POST(a, b) const_mem_params.post_transform_params[a][b] #include "kernels.cu" int main(int argc, char **argv) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } const int repeat = atoi(argv[1]); printf("reset parameters..\n"); ConstMemParams const_mem_params; float function_weights[NUM_FUNCTIONS]; memset(&const_mem_params, 0, sizeof const_mem_params); for(int i = 0; i < NUM_FUNCTIONS; i++) { function_weights[i] = 0.0f; const_mem_params.variation_parameters[i][0].factor = 1.0f; const_mem_params.pre_transform_params[i][0] = 1.0f; const_mem_params.pre_transform_params[i][4] = 1.0f; const_mem_params.post_transform_params[i][0] = 1.0f; const_mem_params.post_transform_params[i][4] = 1.0f; } function_weights[0] = 1.0f; AFFINE_PRE(0, 0) = 1.4f; AFFINE_PRE(0, 1) = 0.6f; AFFINE_PRE(1, 2) = 0.4f; AFFINE_PRE(2, 0) = 0.3f; AFFINE_PRE(2, 2) = 1.0f; FUNC_COLOR(0) = 1.0f; FUNC_COLOR(2) = 0.4f; FUNC_WEIGHT(1) = 0.5f; FUNC_WEIGHT(2) = 0.6f; const_mem_params.enable_sierpinski = 1; VARPARM(2, 0).idx = 9; VARPARM(0, 0).idx = 13; VARPARM(1, 0).idx = 5; VARPARM(1, 1).idx = 7; VARPARM(2, 0).factor = 1.0f; VARPARM(0, 0).factor = 1.0f; VARPARM(1, 0).factor = 1.0f; VARPARM(1, 1).factor = -0.3f; AFFINE_PRE(2, 2) = -1.0f; AFFINE_PRE(2, 5) = -1.0f; srand(2); unsigned mersenne_state[624]; for(int i = 0; i < 624; i++) mersenne_state[i] = rand(); for(int i = 0; i < 10000; i++) mersenne_twister(mersenne_state); printf("generating random numbers\n"); float *rn_tmp = new float[NUM_RANDOMS]; for(int i = 0; i < NUM_RANDOMS; i++) rn_tmp[i] = mersenne_twister(mersenne_state) * (1.0f / 4294967296.0f); float *random_numbers; hipMalloc((void **) &random_numbers, NUM_RANDOMS * sizeof(float)); hipMemcpy(random_numbers, rn_tmp, NUM_RANDOMS * sizeof(float), hipMemcpyHostToDevice); delete[] rn_tmp; printf("generating permutations\n"); PermSortElement *to_sort = new PermSortElement[NUM_THREADS]; unsigned short *perm_data = new unsigned short[NUM_THREADS * NUM_PERMUTATIONS]; for(int i = 0; i < NUM_PERMUTATIONS; i++) { //if(!(i & 127)) printf("."); for(int j = 0; j < NUM_THREADS; j++) { to_sort[j].value = mersenne_twister(mersenne_state); to_sort[j].idx = j; } std::sort(to_sort, to_sort + NUM_THREADS); for(int j = 0; j < NUM_THREADS; j++) { perm_data[i * NUM_THREADS + j] = to_sort[j].idx; } } unsigned short *permutations; hipMalloc((void **) &permutations, NUM_THREADS * NUM_PERMUTATIONS * sizeof(unsigned short)); hipMemcpy(permutations, perm_data, NUM_THREADS * NUM_PERMUTATIONS * sizeof(unsigned short), hipMemcpyHostToDevice); delete[] perm_data; short2 *short_points; hipMalloc((void **) &short_points, NUM_THREADS * sizeof(short2)); short *colors; hipMalloc((void **) &colors, NUM_THREADS * sizeof(short)); float2 *points_tmp = new float2[NUM_THREADS]; for(int i = 0; i < NUM_THREADS; i++) { points_tmp[i].x = (float(i) / NUM_THREADS - 0.5f) * 2.0f; points_tmp[i].y = (radical_inverse(i, 2) - 0.5f) * 2.0f; } float2 *start_points; hipMalloc((void **) &start_points, NUM_THREADS * sizeof(float2)); hipMemcpy(start_points, points_tmp, NUM_THREADS * sizeof(float2), hipMemcpyHostToDevice); delete[] points_tmp; float3 *vertices; hipMalloc((void**)&vertices, NUM_POINTS_PER_THREAD * NUM_THREADS * sizeof(float3)); printf("entering mainloop\n"); struct timeval tv, tv2; gettimeofday(&tv, NULL); int perm_pos = 0; for (int n = 0; n < repeat; n++) { float sum = 0.0f; for(int i = 0; i < NUM_FUNCTIONS; i++) sum += function_weights[i]; int num_threads_sum = 0; for(int i = 0; i < NUM_FUNCTIONS; i++) { int num_threads = (function_weights[i] / sum) * (NUM_THREADS); const_mem_params.thread_function_mapping[i] = num_threads; num_threads_sum += num_threads; } const_mem_params.thread_function_mapping[0] += NUM_THREADS - num_threads_sum; for(int i = 1; i < NUM_FUNCTIONS; i++) { const_mem_params.thread_function_mapping[i] += const_mem_params.thread_function_mapping[i - 1]; } dim3 grid(NUM_THREADS / THREADS_PER_BLOCK, 1, 1); dim3 block(THREADS_PER_BLOCK, 1, 1); hipLaunchKernelGGL(kernel_initialize, dim3(grid), dim3(block ), 0, 0, short_points, colors, permutations, perm_pos++, start_points, random_numbers, const_mem_params); hipDeviceSynchronize(); check_cuda_error("kernel_initialize"); for(int i = 0; i < NUM_ITERATIONS; i++) { hipLaunchKernelGGL(kernel_iterate, dim3(grid), dim3(block ), 0, 0, short_points, colors, permutations, perm_pos++, random_numbers, const_mem_params); perm_pos %= NUM_PERMUTATIONS; hipDeviceSynchronize(); check_cuda_error("kernel_iterate"); } hipLaunchKernelGGL(kernel_generate_points, dim3(grid), dim3(block ), 0, 0, vertices, short_points, colors, permutations, perm_pos++, random_numbers, const_mem_params); hipDeviceSynchronize(); check_cuda_error("kernel_generate_points"); perm_pos += NUM_POINTS_PER_THREAD - 1; perm_pos %= NUM_PERMUTATIONS; } gettimeofday(&tv2, NULL); float frametime = (tv2.tv_sec - tv.tv_sec) * 1000000 + tv2.tv_usec - tv.tv_usec; printf("Total frame time is %.1f us\n", frametime); #ifdef DUMP // dump vertices float3 *pixels = new float3[NUM_POINTS_PER_THREAD * NUM_THREADS]; hipMemcpy(pixels, vertices, NUM_POINTS_PER_THREAD * NUM_THREADS * sizeof(float3), hipMemcpyDeviceToHost); for (int i = 0; i < NUM_POINTS_PER_THREAD * NUM_THREADS; i++) printf("%d x=%.1f y=%.1f color=%.1f\n", i, pixels[i].x, pixels[i].y, pixels[i].z); delete[] pixels; #endif hipFree(start_points); hipFree(short_points); hipFree(colors); hipFree(random_numbers); hipFree(permutations); hipFree(vertices); return 0; }
aae1ef07ead461d3e54b286b624d16775cc796ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cuda_help.h" #include "equal_reduce.h" #include "proj.h" using namespace Legion; namespace legate { namespace numpy { template <typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_eq_reduce_1d(DeferredValue<bool> result, const AccessorRO<T, 1> in1, const AccessorRO<T, 1> in2, const Point<1> origin, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; const coord_t x = origin[0] + offset; int value = (offset >= max) ? 1 : (in1[x] == in2[x]) ? 1 : 0; reduce_bool(result, value); } template <typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_eq_reduce_2d(DeferredValue<bool> result, const AccessorRO<T, 2> in1, const AccessorRO<T, 2> in2, const Point<2> origin, const Point<1> pitch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + offset % pitch[0]; int value = (offset >= max) ? 1 : (in1[x][y] == in2[x][y]) ? 1 : 0; reduce_bool(result, value); } template <typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_eq_reduce_3d(DeferredValue<bool> result, const AccessorRO<T, 3> in1, const AccessorRO<T, 3> in2, const Point<3> origin, const Point<2> pitch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1]; const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1]; int value = (offset >= max) ? 1 : (in1[x][y][z] == in2[x][y][z]) ? 1 : 0; reduce_bool(result, value); } template <typename T> /*static*/ DeferredValue<bool> EqualReducTask<T>::gpu_variant( const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { DeferredValue<bool> result(true /*initial value*/); LegateDeserializer derez(task->args, task->arglen); const int dim = derez.unpack_dimension(); switch (dim) { case 1: { const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez); if (rect.empty()) break; const AccessorRO<T, 1> in1 = derez.unpack_accessor_RO<T, 1>(regions[0], rect); const AccessorRO<T, 1> in2 = derez.unpack_accessor_RO<T, 1>(regions[1], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; hipLaunchKernelGGL(( legate_eq_reduce_1d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, result, in1, in2, rect.lo, volume); break; } case 2: { const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez); if (rect.empty()) break; const AccessorRO<T, 2> in1 = derez.unpack_accessor_RO<T, 2>(regions[0], rect); const AccessorRO<T, 2> in2 = derez.unpack_accessor_RO<T, 2>(regions[1], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t pitch = rect.hi[1] - rect.lo[1] + 1; hipLaunchKernelGGL(( legate_eq_reduce_2d<T>) , dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, result, in1, in2, rect.lo, Point<1>(pitch), volume); break; } case 3: { const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez); if (rect.empty()) break; const AccessorRO<T, 3> in1 = derez.unpack_accessor_RO<T, 3>(regions[0], rect); const AccessorRO<T, 3> in2 = derez.unpack_accessor_RO<T, 3>(regions[1], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t diffy = rect.hi[1] - rect.lo[1] + 1; const coord_t diffz = rect.hi[2] - rect.lo[2] + 1; const coord_t pitch[2] = {diffy * diffz, diffz}; hipLaunchKernelGGL(( legate_eq_reduce_3d<T>) , dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, result, in1, in2, rect.lo, Point<2>(pitch), volume); break; } default: assert(false); } return result; } INSTANTIATE_DEFERRED_VALUE_TASK_VARIANT(EqualReducTask, bool, gpu_variant) } // namespace numpy } // namespace legate
aae1ef07ead461d3e54b286b624d16775cc796ca.cu
/* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cuda_help.h" #include "equal_reduce.h" #include "proj.h" using namespace Legion; namespace legate { namespace numpy { template <typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_eq_reduce_1d(DeferredValue<bool> result, const AccessorRO<T, 1> in1, const AccessorRO<T, 1> in2, const Point<1> origin, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; const coord_t x = origin[0] + offset; int value = (offset >= max) ? 1 : (in1[x] == in2[x]) ? 1 : 0; reduce_bool(result, value); } template <typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_eq_reduce_2d(DeferredValue<bool> result, const AccessorRO<T, 2> in1, const AccessorRO<T, 2> in2, const Point<2> origin, const Point<1> pitch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + offset % pitch[0]; int value = (offset >= max) ? 1 : (in1[x][y] == in2[x][y]) ? 1 : 0; reduce_bool(result, value); } template <typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_eq_reduce_3d(DeferredValue<bool> result, const AccessorRO<T, 3> in1, const AccessorRO<T, 3> in2, const Point<3> origin, const Point<2> pitch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1]; const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1]; int value = (offset >= max) ? 1 : (in1[x][y][z] == in2[x][y][z]) ? 1 : 0; reduce_bool(result, value); } template <typename T> /*static*/ DeferredValue<bool> EqualReducTask<T>::gpu_variant( const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { DeferredValue<bool> result(true /*initial value*/); LegateDeserializer derez(task->args, task->arglen); const int dim = derez.unpack_dimension(); switch (dim) { case 1: { const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez); if (rect.empty()) break; const AccessorRO<T, 1> in1 = derez.unpack_accessor_RO<T, 1>(regions[0], rect); const AccessorRO<T, 1> in2 = derez.unpack_accessor_RO<T, 1>(regions[1], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; legate_eq_reduce_1d<T><<<blocks, THREADS_PER_BLOCK>>>(result, in1, in2, rect.lo, volume); break; } case 2: { const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez); if (rect.empty()) break; const AccessorRO<T, 2> in1 = derez.unpack_accessor_RO<T, 2>(regions[0], rect); const AccessorRO<T, 2> in2 = derez.unpack_accessor_RO<T, 2>(regions[1], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t pitch = rect.hi[1] - rect.lo[1] + 1; legate_eq_reduce_2d<T> <<<blocks, THREADS_PER_BLOCK>>>(result, in1, in2, rect.lo, Point<1>(pitch), volume); break; } case 3: { const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez); if (rect.empty()) break; const AccessorRO<T, 3> in1 = derez.unpack_accessor_RO<T, 3>(regions[0], rect); const AccessorRO<T, 3> in2 = derez.unpack_accessor_RO<T, 3>(regions[1], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t diffy = rect.hi[1] - rect.lo[1] + 1; const coord_t diffz = rect.hi[2] - rect.lo[2] + 1; const coord_t pitch[2] = {diffy * diffz, diffz}; legate_eq_reduce_3d<T> <<<blocks, THREADS_PER_BLOCK>>>(result, in1, in2, rect.lo, Point<2>(pitch), volume); break; } default: assert(false); } return result; } INSTANTIATE_DEFERRED_VALUE_TASK_VARIANT(EqualReducTask, bool, gpu_variant) } // namespace numpy } // namespace legate
288f19b959e9253ecbadfea15b7989ac61e5aa26.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorScatterGather.cu" #else #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_gatherKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(gather)(THCState* state, THCTensor *tensor, THCTensor *src, int dim, THCudaLongTensor *index) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, src), 4, "Index tensor must have same dimensions as input tensor"); THLongStorage *indexSize = THCudaLongTensor_newSizeOf(state, index); THArgCheck(THCTensor_(isSize)(state, tensor, indexSize), 4, "Index tensor must have the same size as output tensor."); THLongStorage_free(indexSize); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 3, "Index dimension is out of bounds"); THArgCheck(THCTensor_(nDimension)(state, src) == THCTensor_(nDimension)(state, tensor), 2, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCTensor_(size)(state, src, d), 2, "Input tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); THCudaCheck(hipGetLastError()); break; case 2: RUN(unsigned int, 2, real); THCudaCheck(hipGetLastError()); break; case 3: RUN(unsigned int, 3, real); THCudaCheck(hipGetLastError()); break; default: RUN(unsigned int, -1, real); THCudaCheck(hipGetLastError()); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); THCudaCheck(hipGetLastError()); } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_scatterKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimension)(state, src) == THCTensor_(nDimension)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { int64_t indexSizeD = THCudaLongTensor_size(state, index, d); if (d != dim) { THArgCheck(indexSizeD <= THCTensor_(size)(state, tensor, d), 3, "Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s", dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str); } THArgCheck(indexSizeD <= THCTensor_(size)(state, src, d), 3, "Index tensor must not have larger size than input tensor, but got index %s input %s", THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str); } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_scatterAddKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimension)(state, src) == THCTensor_(nDimension)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCTensor_(size)(state, src, d), 4, "Input tensor must have same size as output tensor apart from the specified dimension"); } int64_t indexSizeD = THCudaLongTensor_size(state, index, d); if (d != dim) { THArgCheck(indexSizeD <= THCTensor_(size)(state, tensor, d), 3, "Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s", dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str); } THArgCheck(indexSizeD <= THCTensor_(size)(state, src, d), 3, "Index tensor must not have larger size than input tensor, but got index %s input %s", THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str); } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_scatterFillKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, indexInfo, value, dim, (TYPE)totalElements); void THCTensor_(scatterFill)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, tensor), 3, "Index tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCudaLongTensor_size(state, index, d), 4, "Index tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #endif
288f19b959e9253ecbadfea15b7989ac61e5aa26.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorScatterGather.cu" #else #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_gatherKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(gather)(THCState* state, THCTensor *tensor, THCTensor *src, int dim, THCudaLongTensor *index) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, src), 4, "Index tensor must have same dimensions as input tensor"); THLongStorage *indexSize = THCudaLongTensor_newSizeOf(state, index); THArgCheck(THCTensor_(isSize)(state, tensor, indexSize), 4, "Index tensor must have the same size as output tensor."); THLongStorage_free(indexSize); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 3, "Index dimension is out of bounds"); THArgCheck(THCTensor_(nDimension)(state, src) == THCTensor_(nDimension)(state, tensor), 2, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCTensor_(size)(state, src, d), 2, "Input tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); THCudaCheck(cudaGetLastError()); break; case 2: RUN(unsigned int, 2, real); THCudaCheck(cudaGetLastError()); break; case 3: RUN(unsigned int, 3, real); THCudaCheck(cudaGetLastError()); break; default: RUN(unsigned int, -1, real); THCudaCheck(cudaGetLastError()); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); THCudaCheck(cudaGetLastError()); } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_scatterKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimension)(state, src) == THCTensor_(nDimension)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { int64_t indexSizeD = THCudaLongTensor_size(state, index, d); if (d != dim) { THArgCheck(indexSizeD <= THCTensor_(size)(state, tensor, d), 3, "Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s", dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str); } THArgCheck(indexSizeD <= THCTensor_(size)(state, src, d), 3, "Index tensor must not have larger size than input tensor, but got index %s input %s", THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str); } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_scatterAddKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimension)(state, src) == THCTensor_(nDimension)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCTensor_(size)(state, src, d), 4, "Input tensor must have same size as output tensor apart from the specified dimension"); } int64_t indexSizeD = THCudaLongTensor_size(state, index, d); if (d != dim) { THArgCheck(indexSizeD <= THCTensor_(size)(state, tensor, d), 3, "Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s", dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str); } THArgCheck(indexSizeD <= THCTensor_(size)(state, src, d), 3, "Index tensor must not have larger size than input tensor, but got index %s input %s", THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str); } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_scatterFillKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, indexInfo, value, dim, (TYPE)totalElements); void THCTensor_(scatterFill)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, tensor), 3, "Index tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCudaLongTensor_size(state, index, d), 4, "Index tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #endif
5d9d3ad9ee93935ead665ed38222f486cfbc6ace.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); //printf("label_value: %d\n", label_value); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { //this->Forward_cpu(bottom, top); #if 1 softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } Dtype normalizer = LossLayer<Dtype>::GetNormalizer( normalization_, outer_num_, inner_num_, valid_count); top[0]->mutable_cpu_data()[0] = loss / normalizer; if (top.size() == 2) { top[1]->ShareData(prob_); } #endif } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } Dtype normalizer = LossLayer<Dtype>::GetNormalizer( normalization_, outer_num_, inner_num_, valid_count); const Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer; caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
5d9d3ad9ee93935ead665ed38222f486cfbc6ace.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); //printf("label_value: %d\n", label_value); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { //this->Forward_cpu(bottom, top); #if 1 softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } Dtype normalizer = LossLayer<Dtype>::GetNormalizer( normalization_, outer_num_, inner_num_, valid_count); top[0]->mutable_cpu_data()[0] = loss / normalizer; if (top.size() == 2) { top[1]->ShareData(prob_); } #endif } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } Dtype normalizer = LossLayer<Dtype>::GetNormalizer( normalization_, outer_num_, inner_num_, valid_count); const Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer; caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
0b2822c84e252b1caca55fb6fc3de07491f5b0bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // cudamatrix/cu-kernels.cu // Copyright 2009-2012 Karel Vesely // 2013 Ehsan Variani // 2013 Johns Hopkins University (author: Daniel Povey) // 2013 Hainan Xu // 2013 Xiaohui Zhang // 2013 Johns Hopkins University (author: Guoguo Chen) // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. // In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers #include <cfloat> #include "cu-kernels-ansi.h" /*********************************************************************** * Generic __device__ functions */ template<typename Real> __device__ static Real _sum_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (sum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x >= halfPoint) { // was < // Get the shared value stored by another thread Real temp = 0.0; if(threadIdx.x < nTotalThreads) { // was +halfPoint temp = buffer[threadIdx.x]; // was +halfPoint } buffer[threadIdx.x - halfPoint] += temp; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static Real _min_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (min) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp < buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two } // the result return buffer[0]; } template<typename Real> __device__ static Real _max_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (max) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (get index of maximum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread Real temp = -1e20; if(threadIdx.x+halfPoint < nTotalThreads) { temp = val[idx[threadIdx.x + halfPoint]]; } if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return idx[0]; } /*********************************************************************** * CUDA kernels * the functions are templated to have the float/double operations */ /* * CuMatrix */ template<typename Real> __global__ static void _copy_low_upp(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i <= j || i >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } template<typename Real> __global__ static void _copy_upp_low(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j <= i || j >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } // mat += diag(vec) * mat2. template<typename Real> __global__ static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim, const Real *vec, const Real *mat2, int mat2_row_stride, int mat2_col_stride, Real beta) { // Note from Dan: in this kernel, we make the x dimension correspond to the // row index and y to the column index. That was not always the case for // earlier kernels written by others. int i = blockIdx.y * blockDim.y + threadIdx.y; // row index int j = blockIdx.x * blockDim.x + threadIdx.x; // column index int index = i * mat_dim.stride + j, index2 = i * mat2_row_stride + j * mat2_col_stride; if (i < mat_dim.rows && j < mat_dim.cols) { mat[index] = alpha * vec[i] * mat2[index2] + beta * mat[index]; } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (i * (i+1) / 2) + j; int32_cuda index_A = i * dmat.stride + j; if (j <= i) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // transpose the indices used to index the source TpMatrix. if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (j * (j+1) / 2) + i; int32_cuda index_A = i * dmat.stride + j; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } // for this kernel, following the newer pattern, the x-dim is the row-index, the // y-dim is the col-index. template<typename Real, typename OtherReal> __global__ static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index. int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = j + i * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } // for this kernel, the x-dim is the row-index at the output, the y-dim is the // col-index at the output template<typename Real, typename OtherReal> __global__ static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index out int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index out int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = i + j * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<typename Real> __global__ static void _transpose_matrix(Real* mat, MatrixDim d) { // Transposes a square matrix in-place. int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j >= i || i >= d.rows) { return; } // Only half the threads act. int32_cuda index_a = j + i * d.stride, index_b = i + j * d.stride; Real a = mat[index_a], b = mat[index_b]; mat[index_a] = b; mat[index_b] = a; } template<typename Real> __global__ static void _copy_col_from_vec(Real* mat, const Real* v, int col, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < d.rows ) { int32_cuda index = col + i * d.stride; mat[index] = v[i]; } } template<typename Real> __global__ static void _apply_exp(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if ( i < d.cols && j < d.rows ) { mat[index] = exp(mat[index]); } } template<typename Real> __global__ static void _scale_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value * mat[index]; } } template<typename Real> __global__ static void _set_diag(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = i + i*d.stride; if ( i < d.rows && i < d.cols) { mat[index] = value; } } template<typename Real> __global__ static void _set_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value; } } template<typename Real> __global__ static void _add_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = mat[index] + value; } } template<typename Real> __global__ static void _set_const(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = value; } template<typename Real> __global__ static void _set_zero_above_diag(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < i) mat[index] = 0.0; } template<typename Real> __global__ static void _add(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] + value; } template<typename Real> __global__ static void _scale(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] * value; } template<typename Real> __global__ static void _apply_log(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = log(mat[index]); } template<typename Real> __global__ static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] * A[src_index]; } template<typename Real> __global__ static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if ( i < dst_d.cols && j < dst_d.rows ) { Real a = mat[dst_index], b = A[src_index]; mat[dst_index] = (a > b ? a : b); } } template<typename Real> __global__ static void _vec_mul_elements(Real* v, const Real* a, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) v[i] = v[i] * a[i]; } template<typename Real> __global__ static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[i]; } template<typename Real> __global__ static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[j]; } template<typename Real> __global__ static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride; int src_index = i / group_size + j * src_stride; y[dst_index] *= x[src_index]; } } /// y is the derivative we will output; vec is the input we're computing /// the group p-norm on, "norm" is the previously computed group p-norm. template<typename Real> __global__ static void _calc_pnorm_deriv(Real *deriv, const Real *vec, const Real *norm, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. norm_element = norm[src_index]; // this is the pnorm Real vec_element_sign = (vec_element > 0 ? 1 : -1); Real ans; if (norm_element <= 0.0) ans = 0.0; // The derivative is either zero or undefined at the origin. else ans = vec_element_sign * pow(std::abs(vec_element), power - 1) * pow(norm_element, 1 - power); deriv[dst_index] = ans; } } /// Set each element to y = (x == orig ? changed : x). template<typename Real> __global__ static void _replace_value(Real *vec, int dim, Real orig, Real changed) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) if (vec[i] == orig) vec[i] = changed; } template<typename Real> __global__ static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (j >= d.rows ) return; //invert divider in shared memory __shared__ Real inv[16]; if(threadIdx.x==0) { inv[threadIdx.y] = 1.0/vec_div[j]; } __syncthreads(); //multiply elements if (i < d.cols && j < d.rows) mat[index] *= inv[threadIdx.y]; } template<typename Real> __global__ static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; int32_cuda index_src = i + j*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j *d.stride; int32_cuda index_src = j + i*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_mat_div_mat(const Real* A, const Real* B, const Real* C, Real* dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, a_index = i + j*stride_a, b_index = i + j*stride_b, c_index = i + j*stride_c; if (i < d.cols && j < d.rows) if (C[c_index] == 0) dst[index] = A[a_index]; else dst[index] = A[a_index] * B[b_index] / C[c_index]; } // Given a matrix input S (not packed!) and a lower-triangular matrix L, // this function does S = beta S + alpha * L^T L. This is used in PSD matrix inversion. // The i index is the row of the destination S and the j the column (although of // course the output is symmetric so it doesn't matter in a sense). The main point // of this is to make use of various symmetries and zero-ness. template<typename Real> __global__ static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim, Real *S, MatrixDim sdim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= sdim.rows || j > i) return; // this thread computes the dot-product of the i'th column of // L with the j'th column of L. The values we're multiplying // are only nonzero for row-index k greater or equal to // max(i, j), which equals i. Real sum = 0.0; for (int k = i; k < sdim.rows; k++) { int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k; sum += T[i_index] * T[j_index]; } int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i; S[output_index1] = alpha * sum + beta * S[output_index1]; S[output_index2] = alpha * sum + beta * S[output_index2]; } template<typename Real> __global__ static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*col[j] + beta*dst[index]; } template<typename Real> __global__ static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*row[i] + beta*dst[index]; } template<typename Real> __global__ static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*dmat.stride; int32_cuda index2 = i + j*dmask.stride; if ( i < dmat.cols && j < dmat.rows ) if(mask[index2] == 0) mat[index] = 0; } template<typename Real> __global__ static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim, const Real *mat2, int mat2_row_stride, int mat2_col_stride, const Real *vec, Real beta) { // Note from Dan: in this kernel, we make the x dimension correspond to the // row index and y to the column index. That was not always the case for // earlier kernels written by others. int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j, index2 = i * mat2_row_stride + j * mat2_col_stride; if (i < mat_dim.rows && j < mat_dim.cols) { mat[index] = alpha * mat2[index2] * vec[j] + beta * mat[index]; } } template<typename Real> __global__ static void _add_mat_mat_elements(Real *data, const Real *srcA_data, const Real *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j*dim.stride; int32_cuda srcA_index = i + j*srcA_stride; int32_cuda srcB_index = i + j*srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index] + beta * data[tgt_index] ; } } /* * CuVector */ // very limited application! template<typename Real> __global__ static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2, Real param_3, int* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { Real ratio = a[i] / param_3; if ( ( ratio < 0.0 ) || ( ratio >= 1.01 )) { *flag = 1; return; } if ( ratio < param_1 ) { Real factor = ((param_1/ratio) > param_2) ? param_2 : (param_1/ratio); v[i] = v[i] / factor; } else if ( ratio > param_1 ) { Real factor = ((ratio/param_1) > param_2) ? param_2 : (ratio/param_1); v[i] = v[i] * factor; } } } template<typename Real> __global__ static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (double) v_in[i]; } } // This kernel writes a copy of the vector "v_in" to each row of the matrix // "m_out". the dimension of v_in should be equal to the #columns of m_out. In // this kernel, following the new pattern, x corresponds to row-index and y to // column-index. template<typename Real> __global__ static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row index. int j = blockIdx.y * blockDim.y + threadIdx.y; // column index. if (i < d.rows && j < d.cols) { int index = i * d.stride + j; m_out[index] = v_in[j]; } } template<typename Real> __global__ static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if ( i < dim) { v_out[i] = (float) v_in[i]; } } template<typename Real> __global__ static void _vec_min(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= CU1DBLOCK) return; __shared__ Real row_data[CU1DBLOCK]; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real min = 1.0 / 0.0; // infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j < min) min = v_j; } row_data[i] = min; __syncthreads(); //get the sum *value = _min_reduce(row_data); } template<typename Real> __global__ static void _vec_max(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.y > 0) return; __shared__ Real row_data[CU1DBLOCK]; if(i >= CU1DBLOCK) return; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real max = -1.0 / 0.0; // -infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j > max) max = v_j; } row_data[i] = max; __syncthreads(); //get the sum *value = _max_reduce(row_data); } // _trace_mat_mat expects to be called with 1 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 0]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; int block_size = (num_elements + num_threads - 1) / num_threads; int loop_start = i * block_size, loop_end = (i + 1) * block_size; if (loop_end > num_elements) loop_end = num_elements; Real sum = 0.0; for (int j = loop_start; j < loop_end; j++) { // for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = row + col * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // _trace_mat_mat_trans expects to be called with 4 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 3]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; // int block_size = (num_elements + num_threads - 1) / num_threads; // int loop_start = i * block_size, loop_end = (i + 1) * block_size; // if (loop_end > num_elements) // loop_end = num_elements; Real sum = 0.0; // for (int j = loop_start; j < loop_end; j++) { for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = col + row * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // Adds diag(M N) to v, where M and N are matrices. We supply row_stride and // col_stride arguments for M and N, and swapping them allows us to transpose // those matrices. Note: we imagine row-major indexing here, just like Kaldi // and CBLAS (but unlike CUBLAS). // This kernel expects the blockDim to be (CU1DBLOCK, 1) and the // gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element. // threads_per_element should be a power of 2. template<typename Real> __global__ static void _add_diag_mat_mat( Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride, int M_col_stride, const Real *N, int N_row_stride, int N_col_stride, int threads_per_element, Real beta) { // we actually assume blockDim.x == CU1DBLOCK here. // Each diagonal element of v is processed by "threads_per_element" threads. __shared__ Real temp_data[CU1DBLOCK]; int i = blockIdx.x * blockDim.x + threadIdx.x; int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells // us which block of elements we sum up. if (v_idx >= v_dim) return; Real sum = 0.0; for (int j = sub_idx; j < M_cols; j += threads_per_element) { int M_index = v_idx * M_row_stride + j * M_col_stride, N_index = j * N_row_stride + v_idx * N_col_stride; sum += M[M_index] * N[N_index]; } temp_data[threadIdx.x] = sum; // start_idx = threadIdx.x - sub_idx; // start of the position in temp_data // that we want to sum up. // The following is a tree-based reduction of the elements of temp_data from // start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx". __syncthreads(); int num_total_threads = threads_per_element; while (num_total_threads > 1) { int half_point = ((1 + num_total_threads) >> 1); if (sub_idx < half_point) { Real temp = 0.0; if (sub_idx + half_point < num_total_threads) { temp = temp_data[threadIdx.x + half_point]; } temp_data[threadIdx.x] += temp; } __syncthreads(); num_total_threads = half_point; } if (sub_idx == 0) { v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x]; } } template<typename Real> __global__ static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) v[i] = alpha * x[i] * y[i] + beta * v[i]; } template<typename Real> __global__ static void _copy_col_from_mat(Real* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_df(double* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (double) mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_fd(float* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (float) mat[index]; } template<typename Real> __global__ static void _vec_apply_exp(Real* v, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v[i] = exp(v[i]); } } template<typename Real> __global__ static void _vec_apply_log(Real* v, Real* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { if (v[i] < 0) { *flag = 1; return; } v[i] = log(v[i]); } } template<typename Real> __global__ static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z, MatrixDim d, Real* z2, MatrixDim d2, Real* t) { int i = threadIdx.x; __shared__ Real tot_objf[CU1DBLOCK]; __shared__ Real tot_weight[CU1DBLOCK]; Real tmp_weight_sum = 0; Real tmp_tot_objf = 0; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i*size; loop_end = threshold + (i+1)*size; } for(int j = loop_start; j< loop_end; j++) { int m = (x + j)->row; //* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) ); int label = (x + j)->column; //*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int)); Real weight = (x + j)->weight; //*(Real*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) ) + 2 * sizeof(int)); tmp_weight_sum += weight; Real this_prob = *(z + m * d.stride + label); tmp_tot_objf += weight * log(this_prob); *(z2 + m * d2.stride + label ) += weight / this_prob;// there might be problems here.... } tot_objf[i] = tmp_tot_objf; tot_weight[i] = tmp_weight_sum; __syncthreads(); *t = _sum_reduce(tot_objf); __syncthreads(); *(t+1) = _sum_reduce(tot_weight); return; } template<typename Real> __global__ static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha, MatrixElement<Real>* x, int s) { int i = threadIdx.x; if (i >= s) return; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i*size; loop_end = threshold + (i+1)*size; } for(int j = loop_start; j < loop_end; j++) { *(data + x[j].row * dim.stride + x[j].column) += alpha * x[j].weight; } } template<typename Real> __global__ static void _matrix_lookup(const Real *data, MatrixDim dim, const Int32Pair *indices, int indices_size, Real *output) { int ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= indices_size) return; int data_ind = indices[ind].first * dim.stride + indices[ind].second; output[ind] = data[data_ind]; } template<typename Real> __global__ static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; //col int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; //row int32_cuda index_mat1 = i + j*mat1_dim.stride; int32_cuda index_mat2 = i + j*mat2_stride; int32_cuda index_mask = i + j*mask_stride; if (i < mat1_dim.cols && j < mat1_dim.rows) mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0); } template<typename Real> __global__ static void _vec_sum(Real *v, Real *sum, int dim, int inc) { int i = threadIdx.x; __shared__ Real row_data[CU1DBLOCK]; if (i >= CU1DBLOCK) return; Real tmp_sum = 0; int size = dim / CU1DBLOCK; //the least size in a loop (later part) int threshold = dim - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i * size; loop_end = threshold + (i+1) * size; } for(int j = loop_start; j< loop_end; j++) { tmp_sum += v[j * inc]; } row_data[threadIdx.x] = tmp_sum; __syncthreads(); *sum = _sum_reduce(row_data); } template<typename Real> __global__ static void _pvec_sum(Real* v, Real* g, int dim, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int start = size * i; if (start >= dim) return; int end = start + size; if (end > dim) end = dim; __shared__ Real row_data[CU1DBLOCK]; Real sum = 0; for (int j = start; j < end; j++) sum += v[j]; row_data[threadIdx.x] = sum; __syncthreads(); g[blockIdx.x] = _sum_reduce(row_data); } template<typename Real> __global__ static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] < floor_val) { v[i] = floor_val; count[i] = 1; } else { count[i] = 0; } } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_pow(Real* mat, Real power, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { if (power == 1.0) return; if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { if (!(mat[index] >= 0.0)) return; mat[index] = sqrt(mat[index]); } else { mat[index] = pow(mat[index], power); } } } template<typename Real> __global__ static void _apply_pow_abs(Real* mat, Real power, bool include_sign, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { if (include_sign == true && mat[index] < 0) { if (power == 1.0) mat[index] = -std::abs(mat[index]); if (power == 2.0) { mat[index] = -mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = -sqrt(std::abs(mat[index])); } else { mat[index] = -pow(std::abs(mat[index]), power); } } else { if (power == 1.0) mat[index] = std::abs(mat[index]); if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = sqrt(std::abs(mat[index])); } else if (power < 0.0 && mat[index] == 0.0) { mat[index] = 0.0; } else { mat[index] = pow(std::abs(mat[index]), power); } } } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_heaviside(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0); } } template<typename Real> __global__ static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (mat[index] < floor_val) mat[index] = floor_val; } } template<typename Real> __global__ static void _copy_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[j], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = i * src_stride + reorder[j]; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0.0; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[i], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = reorder[i] * src_stride + j; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows ) { if (mat[index] > ceiling_val) mat[index] = ceiling_val; } } template<typename Real> __global__ static void _invert_elements(Real* data, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j*d.stride; if (i < d.cols && j < d.rows) data[index] = 1.0/data[index]; } // matrix-wise, do data = alpha * data + beta * A * B^T, // where B is a block matrix. template<typename Real> __global__ static void _add_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[j]; int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset, B_num_rows = block_data.matrix_dim.rows, B_num_cols = block_data.matrix_dim.cols, B_row_stride = block_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(block_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < B_num_cols; k++) { const Real *this_B_col = B_data + k; const Real *this_A_row = A_data + i * A_row_stride + B_row_start * A_col_stride; // this_A_row points to the element A[i][B_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < B_num_rows; l++) // l indexes rows of B. sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + B_col_start); data[index] = alpha * sum + beta * data[index]; } } // For a block matrix B, does B = alpha * C * D + beta * B. // the (x,y,z) indices are the block index, then the row // and column indices within the block. Note: transposition of C and D // is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride), // so it's invisible to this code. The num-cols and num-rows of C and D // are only provided to the extent that they are not already determined // by other quantities. template<typename Real> __global__ static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks, const Real *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const Real *D_data, int D_row_stride, int D_col_stride, Real alpha, Real beta) { int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B. int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block if (b >= num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[b]; if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols) return; // we're outside the dimensions of the b'th block. // B_elem is the element of B we're writing to. Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data) + i * block_data.matrix_dim.stride + j; Real B_val = *B_elem; // B_row and B_col are the (row, col) index into the full matrix B. int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j; const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data + D_col_stride * B_col; Real sum = 0.0; for (int k = 0; k < C_num_cols; k++) { sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride]; } *B_elem = alpha * sum + beta * B_val; } template<typename Real> __global__ static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } // Since this is a newer kernel, x is the row-index and y is the // column-index. template<typename Real> __global__ static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indices) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride + indices[col].first, src_end_index = row * src_dim.stride + indices[col].second; Real sum = 0.0; for (int index = src_start_index; index < src_end_index; index++) sum += src_data[index]; data[dst_index] = sum; } template<typename Real> __global__ static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; // compute the function y[index] = log(1 + exp(x[index])) if(i < d.cols && j < d.rows) { Real val = x[src_index], result; if (val >= 10.0) result = val; // function approaches y=x as x gets large else result = log1p(exp(val)); y[dst_index] = result; } } template<typename Real> __global__ static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; Real tmp = 0; int src_begin_index = i * group_size + j * src_stride; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { tmp += pow(std::abs(x[src_index]), power); } tmp = pow(tmp, Real(1.0 / power)); if (!isnan(tmp)) { y[dst_index] = tmp; } else { Real max_value = x[src_begin_index], min_value = max_value; for (int src_index = src_begin_index + 1; src_index < src_end_index; src_index ++) { if (x[src_index] > max_value) max_value = x[src_index]; if (x[src_index] < min_value) min_value = x[src_index]; } tmp = 0.0; Real max_abs_value = (max_value > -min_value ? max_value : -min_value); // let max_value be the // largest abs(value) if (max_abs_value == 0) { y[dst_index] = 0.0; } else { for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { Real x_scaled = x[src_index] / max_abs_value; tmp += pow(std::abs(x_scaled), Real(power)); } y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value; } } } } /* * cu:: */ template<typename Real> __global__ static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = 1.0 / (1.0 + exp(-x[src_index])); y[dst_index] = res; } } template<typename Real> __global__ static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = y[y_index]*(1.0-y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j * src_stride; if(i < d.cols && j < d.rows) { Real exp_2x = exp(2.0*x[src_index]); Real res; if(isinf(exp_2x)) { res = 1.0; } else { res = (exp_2x - 1.0) / (exp_2x + 1.0); } y[dst_index] = res; } } template<typename Real> __global__ static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (1.0 - y[y_index]*y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; //copy input to aux aux[threadIdx.x] = x[threadIdx.x+j*d.stride]; for(int i=1; i<steps; ++i) { if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride]) aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride]; } //get the maximum value int nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real max = aux[0]; __syncthreads(); // subtract max, apply exp, sum up... y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max); aux[threadIdx.x] = y[threadIdx.x+j*d.stride]; for(int i=1; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max); aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride]; } } nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real sum = aux[0]; __syncthreads(); //normalize by sum... for(int i=0; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum; } } } template<typename Real> __global__ static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = i % d_in.cols; int32_cuda src_row = j + off[i / d_in.cols]; if(src_row < 0) src_row = 0; if(src_row >= d_in.rows) src_row = d_in.rows-1; y[index] = x[src_col + src_row*d_in.stride]; } } template<typename Real> __global__ static void _take_mean(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index1 = i + j * d_in.stride; int32_cuda index2 = j + i * d_in.stride; if (i <= j && j < d_in.rows) { int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = 0.5 * (x[index1] + x[index2]); } } template<typename Real> __global__ static void _take_lower(const Real* x, Real* y, MatrixDim d_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j > i || i >= d_in.rows) return; int index = i * d_in.stride + j; Real val = x[index]; int index_sp = (i * (i+1) / 2) + j; y[index_sp] = val; } template<typename Real> __global__ static void _take_upper(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j < i || j >= d_in.rows) return; int32_cuda index = i * d_in.stride + j; int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = x[index]; } template<typename Real> __global__ static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1) * (i+2) / 2) - 1; if (i < dim) { y[i] = x[index]; } } template<typename Real> __global__ static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dim.rows && j < dim.cols) { int dst_index = i * dim.stride + j, src_index; if (j <= i) { // no transpose src_index = (i * (i+1) / 2) + j; } else { // transpose. src_index = (j * (j+1) / 2) + i; } y[dst_index] = x[src_index]; } } template<typename Real> __global__ static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = copy_from[i]; if(src_col >= 0 && src_col < d_in.cols) { y[index] = x[src_col + j*d_in.stride]; } else { y[index] = 1.0/0.0; } } } template<typename Real> __global__ static void _one(Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { x[i] = 1.0; } } template<typename Real> __global__ static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_row = copy_from[j]; y[index] = x[i + src_row*d_in.stride]; } } template<typename Real> __global__ static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, grad_index = i + j*stride_grad; if (i < d.cols && j < d.rows) { if(wei[index]==0.0) return; //skip L1 if zero weight! Real l1_signed = l1; if(wei[index] < 0.0) //flip sign l1_signed = -l1; Real before = wei[index]; Real after = wei[index] -lr*grad[grad_index] -l1_signed;//simulate update if((after > 0.0) ^ (before > 0.0)) { //sign changed? wei[index] = 0.0; grad[grad_index] = 0.0; } else { wei[index] -= l1_signed; } } } template<typename Real> __global__ static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(blockIdx.x > 0) return; if(blockDim.y != 1) return; __shared__ Real value[CU1DBLOCK]; __shared__ int32_cuda index[CU1DBLOCK]; //copy to shared memory value[threadIdx.x] = mat[i+j*d.stride]; index[threadIdx.x] = threadIdx.x; __syncthreads(); //get the id of the max value int32_cuda out_max = _max_id_reduce(value, index); __syncthreads(); //see if it's bigger value if(threadIdx.x == 0) { if(vec_val[j] <= mat[out_max+j*d.stride]) { vec_val[j] = mat[out_max+j*d.stride]; vec_id[j] = voff+out_max; } } } template<typename Real> __global__ static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(i>0) return; if(j<d.rows) { int32_cuda index = vec_tgt[j] + j*d.stride; Real value = mat_net_out[index]; if(value < 1e-20) value = 1e-20; vec_log_post[j] = log(value); mat_net_out[index] -= 1.0; } } /*********************************************************************** * ANSI-C wrappers of CUDA kernels */ /* * "int32" */ void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } /* * "float" */ /* * CuMatrix */ void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *vec, const float *mat2, int mat2_row_stride, int mat2_col_stride, float beta) { hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaF_copy_col_from_vec(int Gr, int Bl, float* mat, const float* v, int col, MatrixDim d) { hipLaunchKernelGGL(( _copy_col_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat,v,col,d); } void cudaF_transpose_matrix(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _transpose_matrix), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d); } void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power, bool include_sign, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d); } void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d); } void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaF_calc_pnorm_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size, float power) { hipLaunchKernelGGL(( _calc_pnorm_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size, power); } void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) { hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d); } void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } else { hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } } void cudaF_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B, const float *C, float *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { hipLaunchKernelGGL(( _add_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d, stride_a, stride_b, stride_c); } void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T, MatrixDim tdim, float *S, MatrixDim sdim) { hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim); } void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d); } void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d); } void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *mat2, int mat2_row_stride, int mat2_col_stride, const float *vec, float beta) { hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask); } /* * CuVector */ void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig, float changed) { hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed); } void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a, float param_1, float param_2, float param_3, int* flag, int dim) { hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim); } void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) { hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim); } void cudaF_vec_min(const float* v, float* value, int dim) { hipLaunchKernelGGL(( _vec_min), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaF_vec_max(const float* v, float* value, int dim) { hipLaunchKernelGGL(( _vec_max), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaF_trace_mat_mat_trans(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { hipLaunchKernelGGL(( _trace_mat_mat_trans<float,4>) , dim3(4),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaF_trace_mat_mat(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { hipLaunchKernelGGL(( _trace_mat_mat<float,2>) , dim3(2),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M, int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride, int N_col_stride, int threads_per_element, float beta) { hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) { hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim); } void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_sum), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc); } void cudaF_pvec_sum(int Gr, int Bl, float* v, float* pvec_sum, int dim, int size) { hipLaunchKernelGGL(( _pvec_sum), dim3(Gr),dim3(Bl), 0, 0, v, pvec_sum, dim, size); } void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, float alpha, MatrixElement<float>* x, int s) { hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, s); } void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s, const float* z, MatrixDim d, float* z2, MatrixDim d2, float* t) { hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t); } void cudaD_comp_obj_deriv(dim3 Gr,dim3 Bl, MatrixElement<double>* x, int s, const double* z, MatrixDim d, double* z2, MatrixDim d2, double* t) { hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t); } void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst, const float *src, int dim) { hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim); } void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim); } void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) { hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim); } void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) { hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim); } void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) { hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d); } void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d, const float *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, float alpha, float beta, int B_trans) { if (B_trans) { hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const float *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const float *D_data, int D_row_stride, int D_col_stride, float alpha, float beta) { hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaF_soft_hinge (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size, float power) { hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power); } void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in); } void cudaF_one(int Gr, int Bl, float* x, int dim) { hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim); } void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim dim) { hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x, y, dim); } void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) { hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad); } void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d); } void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) { hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d); } void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) { hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in); } void cudaF_copy_col_from_mat(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const float* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indices) { hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices); } void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim, const Int32Pair *indices, int indices_size, float *output) { hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output); } void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1, const float *mat2, float *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* * "double" */ /* * CuMatrix */ void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *vec, const double *mat2, int mat2_row_stride, int mat2_col_stride, double beta) { hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaD_copy_col_from_vec(int Gr, int Bl, double* mat, const double* v, int col, MatrixDim d) { hipLaunchKernelGGL(( _copy_col_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat,v,col,d); } void cudaD_transpose_matrix(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _transpose_matrix), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d); } void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power, bool include_sign, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d); } void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d); } void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaD_calc_pnorm_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size, double power) { hipLaunchKernelGGL(( _calc_pnorm_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size, power); } void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) { hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d); } void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } else { hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } } void cudaD_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A, const double *B, const double *C, double *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { hipLaunchKernelGGL(( _add_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d,stride_a,stride_b,stride_c); } void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta, const double* T, MatrixDim tdim, double *S, MatrixDim sdim) { hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim); } void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d); } void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d); } void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *mat2, int mat2_row_stride, int mat2_col_stride, const double *vec, double beta) { hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask); } /* * CuVector */ void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig, double changed) { hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed); } void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a, double param_1, double param_2, double param_3, int* flag, int dim) { hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim); } void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) { hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim); } void cudaD_vec_min(const double* v, double* value, int dim) { hipLaunchKernelGGL(( _vec_min), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaD_vec_max(const double* v, double* value, int dim) { hipLaunchKernelGGL(( _vec_max), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaD_trace_mat_mat_trans(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { hipLaunchKernelGGL(( _trace_mat_mat_trans<double,4>) , dim3(4),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaD_trace_mat_mat(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { hipLaunchKernelGGL(( _trace_mat_mat<double,2>) , dim3(2),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M, int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride, int N_col_stride, int threads_per_element, double beta) { hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) { hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim); } void cudaD_copy_col_from_mat(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const double* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_sum), dim3(Gr),dim3(Bl), 0, 0, v,value,dim,inc); } void cudaD_pvec_sum(int Gr, int Bl, double* v, double* pvec_sum, int dim, int size) { hipLaunchKernelGGL(( _pvec_sum), dim3(Gr),dim3(Bl), 0, 0, v,pvec_sum,dim,size); } void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, double alpha, MatrixElement<double>* x, int s) { hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, s); } void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst, const double *src, int dim) { hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim); } void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim); } void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) { hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim); } void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) { hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim); } void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) { hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d); } void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d, const double *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, double alpha, double beta, int B_trans) { if (B_trans) { hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const double *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const double *D_data, int D_row_stride, int D_col_stride, double alpha, double beta) { hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaD_soft_hinge (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size, double power) { hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power); } void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in); } void cudaD_one(int Gr, int Bl, double* x, int dim) { hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim); } void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_out) { hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x,y,d_out); } void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d,int stride_grad) { hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad); } void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d); } void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) { hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d); } void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) { hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in); } void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indices) { hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices); } void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim, const Int32Pair *indices, int indices_size, double *output) { hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output); } void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1, const double *mat2, double *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* Some conversion kernels for which it's more convenient to not name them F or D. */ void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); }
0b2822c84e252b1caca55fb6fc3de07491f5b0bf.cu
// cudamatrix/cu-kernels.cu // Copyright 2009-2012 Karel Vesely // 2013 Ehsan Variani // 2013 Johns Hopkins University (author: Daniel Povey) // 2013 Hainan Xu // 2013 Xiaohui Zhang // 2013 Johns Hopkins University (author: Guoguo Chen) // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. // In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers #include <cfloat> #include "cu-kernels-ansi.h" /*********************************************************************** * Generic __device__ functions */ template<typename Real> __device__ static Real _sum_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (sum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x >= halfPoint) { // was < // Get the shared value stored by another thread Real temp = 0.0; if(threadIdx.x < nTotalThreads) { // was +halfPoint temp = buffer[threadIdx.x]; // was +halfPoint } buffer[threadIdx.x - halfPoint] += temp; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static Real _min_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (min) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp < buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two } // the result return buffer[0]; } template<typename Real> __device__ static Real _max_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (max) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (get index of maximum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread Real temp = -1e20; if(threadIdx.x+halfPoint < nTotalThreads) { temp = val[idx[threadIdx.x + halfPoint]]; } if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return idx[0]; } /*********************************************************************** * CUDA kernels * the functions are templated to have the float/double operations */ /* * CuMatrix */ template<typename Real> __global__ static void _copy_low_upp(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i <= j || i >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } template<typename Real> __global__ static void _copy_upp_low(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j <= i || j >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } // mat += diag(vec) * mat2. template<typename Real> __global__ static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim, const Real *vec, const Real *mat2, int mat2_row_stride, int mat2_col_stride, Real beta) { // Note from Dan: in this kernel, we make the x dimension correspond to the // row index and y to the column index. That was not always the case for // earlier kernels written by others. int i = blockIdx.y * blockDim.y + threadIdx.y; // row index int j = blockIdx.x * blockDim.x + threadIdx.x; // column index int index = i * mat_dim.stride + j, index2 = i * mat2_row_stride + j * mat2_col_stride; if (i < mat_dim.rows && j < mat_dim.cols) { mat[index] = alpha * vec[i] * mat2[index2] + beta * mat[index]; } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (i * (i+1) / 2) + j; int32_cuda index_A = i * dmat.stride + j; if (j <= i) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // transpose the indices used to index the source TpMatrix. if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (j * (j+1) / 2) + i; int32_cuda index_A = i * dmat.stride + j; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } // for this kernel, following the newer pattern, the x-dim is the row-index, the // y-dim is the col-index. template<typename Real, typename OtherReal> __global__ static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index. int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = j + i * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } // for this kernel, the x-dim is the row-index at the output, the y-dim is the // col-index at the output template<typename Real, typename OtherReal> __global__ static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index out int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index out int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = i + j * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<typename Real> __global__ static void _transpose_matrix(Real* mat, MatrixDim d) { // Transposes a square matrix in-place. int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j >= i || i >= d.rows) { return; } // Only half the threads act. int32_cuda index_a = j + i * d.stride, index_b = i + j * d.stride; Real a = mat[index_a], b = mat[index_b]; mat[index_a] = b; mat[index_b] = a; } template<typename Real> __global__ static void _copy_col_from_vec(Real* mat, const Real* v, int col, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < d.rows ) { int32_cuda index = col + i * d.stride; mat[index] = v[i]; } } template<typename Real> __global__ static void _apply_exp(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if ( i < d.cols && j < d.rows ) { mat[index] = exp(mat[index]); } } template<typename Real> __global__ static void _scale_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value * mat[index]; } } template<typename Real> __global__ static void _set_diag(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = i + i*d.stride; if ( i < d.rows && i < d.cols) { mat[index] = value; } } template<typename Real> __global__ static void _set_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value; } } template<typename Real> __global__ static void _add_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = mat[index] + value; } } template<typename Real> __global__ static void _set_const(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = value; } template<typename Real> __global__ static void _set_zero_above_diag(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < i) mat[index] = 0.0; } template<typename Real> __global__ static void _add(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] + value; } template<typename Real> __global__ static void _scale(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] * value; } template<typename Real> __global__ static void _apply_log(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = log(mat[index]); } template<typename Real> __global__ static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] * A[src_index]; } template<typename Real> __global__ static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if ( i < dst_d.cols && j < dst_d.rows ) { Real a = mat[dst_index], b = A[src_index]; mat[dst_index] = (a > b ? a : b); } } template<typename Real> __global__ static void _vec_mul_elements(Real* v, const Real* a, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) v[i] = v[i] * a[i]; } template<typename Real> __global__ static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[i]; } template<typename Real> __global__ static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[j]; } template<typename Real> __global__ static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride; int src_index = i / group_size + j * src_stride; y[dst_index] *= x[src_index]; } } /// y is the derivative we will output; vec is the input we're computing /// the group p-norm on, "norm" is the previously computed group p-norm. template<typename Real> __global__ static void _calc_pnorm_deriv(Real *deriv, const Real *vec, const Real *norm, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. norm_element = norm[src_index]; // this is the pnorm Real vec_element_sign = (vec_element > 0 ? 1 : -1); Real ans; if (norm_element <= 0.0) ans = 0.0; // The derivative is either zero or undefined at the origin. else ans = vec_element_sign * pow(std::abs(vec_element), power - 1) * pow(norm_element, 1 - power); deriv[dst_index] = ans; } } /// Set each element to y = (x == orig ? changed : x). template<typename Real> __global__ static void _replace_value(Real *vec, int dim, Real orig, Real changed) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) if (vec[i] == orig) vec[i] = changed; } template<typename Real> __global__ static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (j >= d.rows ) return; //invert divider in shared memory __shared__ Real inv[16]; if(threadIdx.x==0) { inv[threadIdx.y] = 1.0/vec_div[j]; } __syncthreads(); //multiply elements if (i < d.cols && j < d.rows) mat[index] *= inv[threadIdx.y]; } template<typename Real> __global__ static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; int32_cuda index_src = i + j*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j *d.stride; int32_cuda index_src = j + i*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_mat_div_mat(const Real* A, const Real* B, const Real* C, Real* dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, a_index = i + j*stride_a, b_index = i + j*stride_b, c_index = i + j*stride_c; if (i < d.cols && j < d.rows) if (C[c_index] == 0) dst[index] = A[a_index]; else dst[index] = A[a_index] * B[b_index] / C[c_index]; } // Given a matrix input S (not packed!) and a lower-triangular matrix L, // this function does S = beta S + alpha * L^T L. This is used in PSD matrix inversion. // The i index is the row of the destination S and the j the column (although of // course the output is symmetric so it doesn't matter in a sense). The main point // of this is to make use of various symmetries and zero-ness. template<typename Real> __global__ static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim, Real *S, MatrixDim sdim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= sdim.rows || j > i) return; // this thread computes the dot-product of the i'th column of // L with the j'th column of L. The values we're multiplying // are only nonzero for row-index k greater or equal to // max(i, j), which equals i. Real sum = 0.0; for (int k = i; k < sdim.rows; k++) { int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k; sum += T[i_index] * T[j_index]; } int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i; S[output_index1] = alpha * sum + beta * S[output_index1]; S[output_index2] = alpha * sum + beta * S[output_index2]; } template<typename Real> __global__ static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*col[j] + beta*dst[index]; } template<typename Real> __global__ static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*row[i] + beta*dst[index]; } template<typename Real> __global__ static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*dmat.stride; int32_cuda index2 = i + j*dmask.stride; if ( i < dmat.cols && j < dmat.rows ) if(mask[index2] == 0) mat[index] = 0; } template<typename Real> __global__ static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim, const Real *mat2, int mat2_row_stride, int mat2_col_stride, const Real *vec, Real beta) { // Note from Dan: in this kernel, we make the x dimension correspond to the // row index and y to the column index. That was not always the case for // earlier kernels written by others. int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j, index2 = i * mat2_row_stride + j * mat2_col_stride; if (i < mat_dim.rows && j < mat_dim.cols) { mat[index] = alpha * mat2[index2] * vec[j] + beta * mat[index]; } } template<typename Real> __global__ static void _add_mat_mat_elements(Real *data, const Real *srcA_data, const Real *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j*dim.stride; int32_cuda srcA_index = i + j*srcA_stride; int32_cuda srcB_index = i + j*srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index] + beta * data[tgt_index] ; } } /* * CuVector */ // very limited application! template<typename Real> __global__ static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2, Real param_3, int* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { Real ratio = a[i] / param_3; if ( ( ratio < 0.0 ) || ( ratio >= 1.01 )) { *flag = 1; return; } if ( ratio < param_1 ) { Real factor = ((param_1/ratio) > param_2) ? param_2 : (param_1/ratio); v[i] = v[i] / factor; } else if ( ratio > param_1 ) { Real factor = ((ratio/param_1) > param_2) ? param_2 : (ratio/param_1); v[i] = v[i] * factor; } } } template<typename Real> __global__ static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (double) v_in[i]; } } // This kernel writes a copy of the vector "v_in" to each row of the matrix // "m_out". the dimension of v_in should be equal to the #columns of m_out. In // this kernel, following the new pattern, x corresponds to row-index and y to // column-index. template<typename Real> __global__ static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row index. int j = blockIdx.y * blockDim.y + threadIdx.y; // column index. if (i < d.rows && j < d.cols) { int index = i * d.stride + j; m_out[index] = v_in[j]; } } template<typename Real> __global__ static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if ( i < dim) { v_out[i] = (float) v_in[i]; } } template<typename Real> __global__ static void _vec_min(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= CU1DBLOCK) return; __shared__ Real row_data[CU1DBLOCK]; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real min = 1.0 / 0.0; // infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j < min) min = v_j; } row_data[i] = min; __syncthreads(); //get the sum *value = _min_reduce(row_data); } template<typename Real> __global__ static void _vec_max(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.y > 0) return; __shared__ Real row_data[CU1DBLOCK]; if(i >= CU1DBLOCK) return; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real max = -1.0 / 0.0; // -infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j > max) max = v_j; } row_data[i] = max; __syncthreads(); //get the sum *value = _max_reduce(row_data); } // _trace_mat_mat expects to be called with 1 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 0]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; int block_size = (num_elements + num_threads - 1) / num_threads; int loop_start = i * block_size, loop_end = (i + 1) * block_size; if (loop_end > num_elements) loop_end = num_elements; Real sum = 0.0; for (int j = loop_start; j < loop_end; j++) { // for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = row + col * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // _trace_mat_mat_trans expects to be called with 4 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 3]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; // int block_size = (num_elements + num_threads - 1) / num_threads; // int loop_start = i * block_size, loop_end = (i + 1) * block_size; // if (loop_end > num_elements) // loop_end = num_elements; Real sum = 0.0; // for (int j = loop_start; j < loop_end; j++) { for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = col + row * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // Adds diag(M N) to v, where M and N are matrices. We supply row_stride and // col_stride arguments for M and N, and swapping them allows us to transpose // those matrices. Note: we imagine row-major indexing here, just like Kaldi // and CBLAS (but unlike CUBLAS). // This kernel expects the blockDim to be (CU1DBLOCK, 1) and the // gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element. // threads_per_element should be a power of 2. template<typename Real> __global__ static void _add_diag_mat_mat( Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride, int M_col_stride, const Real *N, int N_row_stride, int N_col_stride, int threads_per_element, Real beta) { // we actually assume blockDim.x == CU1DBLOCK here. // Each diagonal element of v is processed by "threads_per_element" threads. __shared__ Real temp_data[CU1DBLOCK]; int i = blockIdx.x * blockDim.x + threadIdx.x; int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells // us which block of elements we sum up. if (v_idx >= v_dim) return; Real sum = 0.0; for (int j = sub_idx; j < M_cols; j += threads_per_element) { int M_index = v_idx * M_row_stride + j * M_col_stride, N_index = j * N_row_stride + v_idx * N_col_stride; sum += M[M_index] * N[N_index]; } temp_data[threadIdx.x] = sum; // start_idx = threadIdx.x - sub_idx; // start of the position in temp_data // that we want to sum up. // The following is a tree-based reduction of the elements of temp_data from // start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx". __syncthreads(); int num_total_threads = threads_per_element; while (num_total_threads > 1) { int half_point = ((1 + num_total_threads) >> 1); if (sub_idx < half_point) { Real temp = 0.0; if (sub_idx + half_point < num_total_threads) { temp = temp_data[threadIdx.x + half_point]; } temp_data[threadIdx.x] += temp; } __syncthreads(); num_total_threads = half_point; } if (sub_idx == 0) { v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x]; } } template<typename Real> __global__ static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) v[i] = alpha * x[i] * y[i] + beta * v[i]; } template<typename Real> __global__ static void _copy_col_from_mat(Real* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_df(double* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (double) mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_fd(float* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (float) mat[index]; } template<typename Real> __global__ static void _vec_apply_exp(Real* v, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v[i] = exp(v[i]); } } template<typename Real> __global__ static void _vec_apply_log(Real* v, Real* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { if (v[i] < 0) { *flag = 1; return; } v[i] = log(v[i]); } } template<typename Real> __global__ static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z, MatrixDim d, Real* z2, MatrixDim d2, Real* t) { int i = threadIdx.x; __shared__ Real tot_objf[CU1DBLOCK]; __shared__ Real tot_weight[CU1DBLOCK]; Real tmp_weight_sum = 0; Real tmp_tot_objf = 0; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i*size; loop_end = threshold + (i+1)*size; } for(int j = loop_start; j< loop_end; j++) { int m = (x + j)->row; //* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) ); int label = (x + j)->column; //*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int)); Real weight = (x + j)->weight; //*(Real*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) ) + 2 * sizeof(int)); tmp_weight_sum += weight; Real this_prob = *(z + m * d.stride + label); tmp_tot_objf += weight * log(this_prob); *(z2 + m * d2.stride + label ) += weight / this_prob;// there might be problems here.... } tot_objf[i] = tmp_tot_objf; tot_weight[i] = tmp_weight_sum; __syncthreads(); *t = _sum_reduce(tot_objf); __syncthreads(); *(t+1) = _sum_reduce(tot_weight); return; } template<typename Real> __global__ static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha, MatrixElement<Real>* x, int s) { int i = threadIdx.x; if (i >= s) return; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i*size; loop_end = threshold + (i+1)*size; } for(int j = loop_start; j < loop_end; j++) { *(data + x[j].row * dim.stride + x[j].column) += alpha * x[j].weight; } } template<typename Real> __global__ static void _matrix_lookup(const Real *data, MatrixDim dim, const Int32Pair *indices, int indices_size, Real *output) { int ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= indices_size) return; int data_ind = indices[ind].first * dim.stride + indices[ind].second; output[ind] = data[data_ind]; } template<typename Real> __global__ static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; //col int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; //row int32_cuda index_mat1 = i + j*mat1_dim.stride; int32_cuda index_mat2 = i + j*mat2_stride; int32_cuda index_mask = i + j*mask_stride; if (i < mat1_dim.cols && j < mat1_dim.rows) mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0); } template<typename Real> __global__ static void _vec_sum(Real *v, Real *sum, int dim, int inc) { int i = threadIdx.x; __shared__ Real row_data[CU1DBLOCK]; if (i >= CU1DBLOCK) return; Real tmp_sum = 0; int size = dim / CU1DBLOCK; //the least size in a loop (later part) int threshold = dim - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i * size; loop_end = threshold + (i+1) * size; } for(int j = loop_start; j< loop_end; j++) { tmp_sum += v[j * inc]; } row_data[threadIdx.x] = tmp_sum; __syncthreads(); *sum = _sum_reduce(row_data); } template<typename Real> __global__ static void _pvec_sum(Real* v, Real* g, int dim, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int start = size * i; if (start >= dim) return; int end = start + size; if (end > dim) end = dim; __shared__ Real row_data[CU1DBLOCK]; Real sum = 0; for (int j = start; j < end; j++) sum += v[j]; row_data[threadIdx.x] = sum; __syncthreads(); g[blockIdx.x] = _sum_reduce(row_data); } template<typename Real> __global__ static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] < floor_val) { v[i] = floor_val; count[i] = 1; } else { count[i] = 0; } } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_pow(Real* mat, Real power, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { if (power == 1.0) return; if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { if (!(mat[index] >= 0.0)) return; mat[index] = sqrt(mat[index]); } else { mat[index] = pow(mat[index], power); } } } template<typename Real> __global__ static void _apply_pow_abs(Real* mat, Real power, bool include_sign, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { if (include_sign == true && mat[index] < 0) { if (power == 1.0) mat[index] = -std::abs(mat[index]); if (power == 2.0) { mat[index] = -mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = -sqrt(std::abs(mat[index])); } else { mat[index] = -pow(std::abs(mat[index]), power); } } else { if (power == 1.0) mat[index] = std::abs(mat[index]); if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = sqrt(std::abs(mat[index])); } else if (power < 0.0 && mat[index] == 0.0) { mat[index] = 0.0; } else { mat[index] = pow(std::abs(mat[index]), power); } } } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_heaviside(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0); } } template<typename Real> __global__ static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (mat[index] < floor_val) mat[index] = floor_val; } } template<typename Real> __global__ static void _copy_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[j], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = i * src_stride + reorder[j]; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0.0; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[i], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = reorder[i] * src_stride + j; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows ) { if (mat[index] > ceiling_val) mat[index] = ceiling_val; } } template<typename Real> __global__ static void _invert_elements(Real* data, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j*d.stride; if (i < d.cols && j < d.rows) data[index] = 1.0/data[index]; } // matrix-wise, do data = alpha * data + beta * A * B^T, // where B is a block matrix. template<typename Real> __global__ static void _add_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[j]; int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset, B_num_rows = block_data.matrix_dim.rows, B_num_cols = block_data.matrix_dim.cols, B_row_stride = block_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(block_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < B_num_cols; k++) { const Real *this_B_col = B_data + k; const Real *this_A_row = A_data + i * A_row_stride + B_row_start * A_col_stride; // this_A_row points to the element A[i][B_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < B_num_rows; l++) // l indexes rows of B. sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + B_col_start); data[index] = alpha * sum + beta * data[index]; } } // For a block matrix B, does B = alpha * C * D + beta * B. // the (x,y,z) indices are the block index, then the row // and column indices within the block. Note: transposition of C and D // is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride), // so it's invisible to this code. The num-cols and num-rows of C and D // are only provided to the extent that they are not already determined // by other quantities. template<typename Real> __global__ static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks, const Real *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const Real *D_data, int D_row_stride, int D_col_stride, Real alpha, Real beta) { int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B. int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block if (b >= num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[b]; if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols) return; // we're outside the dimensions of the b'th block. // B_elem is the element of B we're writing to. Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data) + i * block_data.matrix_dim.stride + j; Real B_val = *B_elem; // B_row and B_col are the (row, col) index into the full matrix B. int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j; const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data + D_col_stride * B_col; Real sum = 0.0; for (int k = 0; k < C_num_cols; k++) { sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride]; } *B_elem = alpha * sum + beta * B_val; } template<typename Real> __global__ static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } // Since this is a newer kernel, x is the row-index and y is the // column-index. template<typename Real> __global__ static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indices) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride + indices[col].first, src_end_index = row * src_dim.stride + indices[col].second; Real sum = 0.0; for (int index = src_start_index; index < src_end_index; index++) sum += src_data[index]; data[dst_index] = sum; } template<typename Real> __global__ static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; // compute the function y[index] = log(1 + exp(x[index])) if(i < d.cols && j < d.rows) { Real val = x[src_index], result; if (val >= 10.0) result = val; // function approaches y=x as x gets large else result = log1p(exp(val)); y[dst_index] = result; } } template<typename Real> __global__ static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; Real tmp = 0; int src_begin_index = i * group_size + j * src_stride; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { tmp += pow(std::abs(x[src_index]), power); } tmp = pow(tmp, Real(1.0 / power)); if (!isnan(tmp)) { y[dst_index] = tmp; } else { Real max_value = x[src_begin_index], min_value = max_value; for (int src_index = src_begin_index + 1; src_index < src_end_index; src_index ++) { if (x[src_index] > max_value) max_value = x[src_index]; if (x[src_index] < min_value) min_value = x[src_index]; } tmp = 0.0; Real max_abs_value = (max_value > -min_value ? max_value : -min_value); // let max_value be the // largest abs(value) if (max_abs_value == 0) { y[dst_index] = 0.0; } else { for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { Real x_scaled = x[src_index] / max_abs_value; tmp += pow(std::abs(x_scaled), Real(power)); } y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value; } } } } /* * cu:: */ template<typename Real> __global__ static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = 1.0 / (1.0 + exp(-x[src_index])); y[dst_index] = res; } } template<typename Real> __global__ static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = y[y_index]*(1.0-y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j * src_stride; if(i < d.cols && j < d.rows) { Real exp_2x = exp(2.0*x[src_index]); Real res; if(isinf(exp_2x)) { res = 1.0; } else { res = (exp_2x - 1.0) / (exp_2x + 1.0); } y[dst_index] = res; } } template<typename Real> __global__ static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (1.0 - y[y_index]*y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; //copy input to aux aux[threadIdx.x] = x[threadIdx.x+j*d.stride]; for(int i=1; i<steps; ++i) { if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride]) aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride]; } //get the maximum value int nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real max = aux[0]; __syncthreads(); // subtract max, apply exp, sum up... y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max); aux[threadIdx.x] = y[threadIdx.x+j*d.stride]; for(int i=1; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max); aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride]; } } nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real sum = aux[0]; __syncthreads(); //normalize by sum... for(int i=0; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum; } } } template<typename Real> __global__ static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = i % d_in.cols; int32_cuda src_row = j + off[i / d_in.cols]; if(src_row < 0) src_row = 0; if(src_row >= d_in.rows) src_row = d_in.rows-1; y[index] = x[src_col + src_row*d_in.stride]; } } template<typename Real> __global__ static void _take_mean(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index1 = i + j * d_in.stride; int32_cuda index2 = j + i * d_in.stride; if (i <= j && j < d_in.rows) { int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = 0.5 * (x[index1] + x[index2]); } } template<typename Real> __global__ static void _take_lower(const Real* x, Real* y, MatrixDim d_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j > i || i >= d_in.rows) return; int index = i * d_in.stride + j; Real val = x[index]; int index_sp = (i * (i+1) / 2) + j; y[index_sp] = val; } template<typename Real> __global__ static void _take_upper(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j < i || j >= d_in.rows) return; int32_cuda index = i * d_in.stride + j; int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = x[index]; } template<typename Real> __global__ static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1) * (i+2) / 2) - 1; if (i < dim) { y[i] = x[index]; } } template<typename Real> __global__ static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dim.rows && j < dim.cols) { int dst_index = i * dim.stride + j, src_index; if (j <= i) { // no transpose src_index = (i * (i+1) / 2) + j; } else { // transpose. src_index = (j * (j+1) / 2) + i; } y[dst_index] = x[src_index]; } } template<typename Real> __global__ static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = copy_from[i]; if(src_col >= 0 && src_col < d_in.cols) { y[index] = x[src_col + j*d_in.stride]; } else { y[index] = 1.0/0.0; } } } template<typename Real> __global__ static void _one(Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { x[i] = 1.0; } } template<typename Real> __global__ static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_row = copy_from[j]; y[index] = x[i + src_row*d_in.stride]; } } template<typename Real> __global__ static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, grad_index = i + j*stride_grad; if (i < d.cols && j < d.rows) { if(wei[index]==0.0) return; //skip L1 if zero weight! Real l1_signed = l1; if(wei[index] < 0.0) //flip sign l1_signed = -l1; Real before = wei[index]; Real after = wei[index] -lr*grad[grad_index] -l1_signed;//simulate update if((after > 0.0) ^ (before > 0.0)) { //sign changed? wei[index] = 0.0; grad[grad_index] = 0.0; } else { wei[index] -= l1_signed; } } } template<typename Real> __global__ static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(blockIdx.x > 0) return; if(blockDim.y != 1) return; __shared__ Real value[CU1DBLOCK]; __shared__ int32_cuda index[CU1DBLOCK]; //copy to shared memory value[threadIdx.x] = mat[i+j*d.stride]; index[threadIdx.x] = threadIdx.x; __syncthreads(); //get the id of the max value int32_cuda out_max = _max_id_reduce(value, index); __syncthreads(); //see if it's bigger value if(threadIdx.x == 0) { if(vec_val[j] <= mat[out_max+j*d.stride]) { vec_val[j] = mat[out_max+j*d.stride]; vec_id[j] = voff+out_max; } } } template<typename Real> __global__ static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(i>0) return; if(j<d.rows) { int32_cuda index = vec_tgt[j] + j*d.stride; Real value = mat_net_out[index]; if(value < 1e-20) value = 1e-20; vec_log_post[j] = log(value); mat_net_out[index] -= 1.0; } } /*********************************************************************** * ANSI-C wrappers of CUDA kernels */ /* * "int32" */ void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } /* * "float" */ /* * CuMatrix */ void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { _copy_upp_low<<<Gr,Bl>>>(A,dimA); } void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { _copy_low_upp<<<Gr,Bl>>>(A,dimA); } void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *vec, const float *mat2, int mat2_row_stride, int mat2_col_stride, float beta) { _add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaF_copy_col_from_vec(int Gr, int Bl, float* mat, const float* v, int col, MatrixDim d) { _copy_col_from_vec<<<Gr,Bl>>>(mat,v,col,d); } void cudaF_transpose_matrix(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _transpose_matrix<<<Gr,Bl>>>(mat, d); } void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_exp<<<Gr,Bl>>>(mat,d); } void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) { _apply_pow<<<Gr,Bl>>>(mat, power, d); } void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power, bool include_sign, MatrixDim d) { _apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d); } void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_heaviside<<<Gr,Bl>>>(mat, d); } void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { _apply_floor<<<Gr,Bl>>>(mat, floor_val, d); } void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) { _apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d); } void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) { _set_diag<<<Gr,Bl>>>(mat,value,d); } void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _set_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _add_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _set_zero_above_diag<<<Gr,Bl>>>(mat, d); } void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _scale_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _scale<<<Gr,Bl>>>(mat,value,d); } void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_log<<<Gr,Bl>>>(mat,d); } void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _max<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { _mul_cols_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { _mul_rows_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { _mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaF_calc_pnorm_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size, float power) { _calc_pnorm_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size, power); } void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) { _div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d); } void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } else { _add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } } void cudaF_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B, const float *C, float *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { _add_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d, stride_a, stride_b, stride_c); } void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T, MatrixDim tdim, float *S, MatrixDim sdim) { _sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim); } void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) { _add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d); } void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) { _add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d); } void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *mat2, int mat2_row_stride, int mat2_col_stride, const float *vec, float beta) { _add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { _add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { _apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask); } /* * CuVector */ void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig, float changed) { _replace_value<<<Gr,Bl>>>(v, dim, orig, changed); } void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a, float param_1, float param_2, float param_3, int* flag, int dim) { _set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim); } void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) { _copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) { _copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) { _vec_mul_elements<<<Gr,Bl>>>(v, a, dim); } void cudaF_vec_min(const float* v, float* value, int dim) { _vec_min<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaF_vec_max(const float* v, float* value, int dim) { _vec_max<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaF_trace_mat_mat_trans(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { _trace_mat_mat_trans<float,4> <<<4,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaF_trace_mat_mat(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { _trace_mat_mat<float,2> <<<2,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M, int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride, int N_col_stride, int threads_per_element, float beta) { _add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) { _add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim); } void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) { _vec_sum<<<Gr,Bl>>>(v, value, dim, inc); } void cudaF_pvec_sum(int Gr, int Bl, float* v, float* pvec_sum, int dim, int size) { _pvec_sum<<<Gr,Bl>>>(v, pvec_sum, dim, size); } void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, float alpha, MatrixElement<float>* x, int s) { _cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, s); } void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s, const float* z, MatrixDim d, float* z2, MatrixDim d2, float* t) { _cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t); } void cudaD_comp_obj_deriv(dim3 Gr,dim3 Bl, MatrixElement<double>* x, int s, const double* z, MatrixDim d, double* z2, MatrixDim d2, double* t) { _cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t); } void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst, const float *src, int dim) { _vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim); } void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) { _vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim); } void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) { _vec_apply_exp<<<Gr,Bl>>>(v,dim); } void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) { _vec_apply_log<<<Gr,Bl>>>(v,flag,dim); } void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) { _invert_elements<<<Gr,Bl>>>(data, d); } void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d, const float *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, float alpha, float beta, int B_trans) { if (B_trans) { _add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { _add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const float *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const float *D_data, int D_row_stride, int D_col_stride, float alpha, float beta) { _block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaF_soft_hinge (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size, float power) { _group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power); } void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _sigmoid<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { _diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _tanh<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { _diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { _softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { _splice<<<Gr,Bl>>>(y,x,off,d_out,d_in); } void cudaF_one(int Gr, int Bl, float* x, int dim) { _one<<<Gr,Bl>>>(x,dim); } void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_mean<<<Gr,Bl>>>(x,y,d_in); } void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_lower<<<Gr,Bl>>>(x,y,d_in); } void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_upper<<<Gr,Bl>>>(x,y,d_in); } void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim dim) { _copy_from_sp<<<Gr,Bl>>>(x, y, dim); } void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) { _regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad); } void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { _find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d); } void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) { _diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d); } void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) { _copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in); } void cudaF_copy_col_from_mat(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { _copy_col_from_mat<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const float* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indices) { _sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices); } void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim, const Int32Pair *indices, int indices_size, float *output) { _matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output); } void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1, const float *mat2, float *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { _equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* * "double" */ /* * CuMatrix */ void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { _copy_upp_low<<<Gr,Bl>>>(A,dimA); } void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { _copy_low_upp<<<Gr,Bl>>>(A,dimA); } void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *vec, const double *mat2, int mat2_row_stride, int mat2_col_stride, double beta) { _add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaD_copy_col_from_vec(int Gr, int Bl, double* mat, const double* v, int col, MatrixDim d) { _copy_col_from_vec<<<Gr,Bl>>>(mat,v,col,d); } void cudaD_transpose_matrix(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _transpose_matrix<<<Gr,Bl>>>(mat, d); } void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_exp<<<Gr,Bl>>>(mat,d); } void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) { _apply_pow<<<Gr,Bl>>>(mat, power, d); } void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power, bool include_sign, MatrixDim d) { _apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d); } void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_heaviside<<<Gr,Bl>>>(mat, d); } void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { _apply_floor<<<Gr,Bl>>>(mat, floor_val, d); } void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) { _apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d); } void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) { _set_diag<<<Gr,Bl>>>(mat,value,d); } void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _set_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _add_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _set_zero_above_diag<<<Gr,Bl>>>(mat, d); } void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _scale_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _scale<<<Gr,Bl>>>(mat,value,d); } void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_log<<<Gr,Bl>>>(mat,d); } void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _max<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { _mul_cols_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { _mul_rows_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { _mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaD_calc_pnorm_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size, double power) { _calc_pnorm_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size, power); } void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) { _div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d); } void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } else { _add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } } void cudaD_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A, const double *B, const double *C, double *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { _add_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d,stride_a,stride_b,stride_c); } void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta, const double* T, MatrixDim tdim, double *S, MatrixDim sdim) { _sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim); } void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) { _add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d); } void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) { _add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d); } void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *mat2, int mat2_row_stride, int mat2_col_stride, const double *vec, double beta) { _add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { _add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { _apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask); } /* * CuVector */ void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig, double changed) { _replace_value<<<Gr,Bl>>>(v, dim, orig, changed); } void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a, double param_1, double param_2, double param_3, int* flag, int dim) { _set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim); } void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) { _copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) { _copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) { _vec_mul_elements<<<Gr,Bl>>>(v, a, dim); } void cudaD_vec_min(const double* v, double* value, int dim) { _vec_min<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaD_vec_max(const double* v, double* value, int dim) { _vec_max<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaD_trace_mat_mat_trans(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { _trace_mat_mat_trans<double,4> <<<4,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaD_trace_mat_mat(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { _trace_mat_mat<double,2> <<<2,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M, int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride, int N_col_stride, int threads_per_element, double beta) { _add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) { _add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim); } void cudaD_copy_col_from_mat(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { _copy_col_from_mat<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const double* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) { _vec_sum<<<Gr,Bl>>>(v,value,dim,inc); } void cudaD_pvec_sum(int Gr, int Bl, double* v, double* pvec_sum, int dim, int size) { _pvec_sum<<<Gr,Bl>>>(v,pvec_sum,dim,size); } void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, double alpha, MatrixElement<double>* x, int s) { _cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, s); } void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst, const double *src, int dim) { _vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim); } void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) { _vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim); } void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) { _vec_apply_exp<<<Gr,Bl>>>(v,dim); } void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) { _vec_apply_log<<<Gr,Bl>>>(v,flag,dim); } void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) { _invert_elements<<<Gr,Bl>>>(data, d); } void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d, const double *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, double alpha, double beta, int B_trans) { if (B_trans) { _add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { _add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const double *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const double *D_data, int D_row_stride, int D_col_stride, double alpha, double beta) { _block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaD_soft_hinge (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size, double power) { _group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power); } void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _sigmoid<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { _diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _tanh<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { _diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { _softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { _splice<<<Gr,Bl>>>(y,x,off,d_out,d_in); } void cudaD_one(int Gr, int Bl, double* x, int dim) { _one<<<Gr,Bl>>>(x,dim); } void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_mean<<<Gr,Bl>>>(x,y,d_in); } void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_lower<<<Gr,Bl>>>(x,y,d_in); } void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_upper<<<Gr,Bl>>>(x,y,d_in); } void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_out) { _copy_from_sp<<<Gr,Bl>>>(x,y,d_out); } void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d,int stride_grad) { _regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad); } void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { _find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d); } void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) { _diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d); } void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) { _copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in); } void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indices) { _sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices); } void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim, const Int32Pair *indices, int indices_size, double *output) { _matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output); } void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1, const double *mat2, double *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { _equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* Some conversion kernels for which it's more convenient to not name them F or D. */ void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); }
a116ded09d97477389f0bb639fe3367afa5782a6.hip
// !!! This is a file automatically generated by hipify!!! #include "THHTensorMath.h" #include "THHGeneral.h" #include "THHBlas.h" #include "THHTensorCopy.h" #include "THHTensorRandom.h" #include "THHApply.cuh" #include "THHReduce.cuh" #include <thrust/device_ptr.h> #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> #ifndef DIVUP #define DIVUP(x, y) (((x) + (y) - 1) / (y)) #endif #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC) \ struct Tensor##NAME##Op { \ __device__ __forceinline__ void operator()(float* out, float* in) const { \ *out = CFUNC(*in); \ } \ \ __device__ __forceinline__ void operator()(float* v) const { \ *v = CFUNC(*v); \ } \ }; \ \ void THCudaTensor_##NAME(THCState* state, THCudaTensor* self_, THCudaTensor* src) { \ THAssert(THCudaTensor_checkGPU(state, 2, self_, src)); \ if (self_ == src) { \ if (!THCudaTensor_pointwiseApply1(state, self_, Tensor##NAME##Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCudaTensor_resizeAs(state, self_, src); \ \ if (!THCudaTensor_pointwiseApply2(state, self_, src, Tensor##NAME##Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ \ THCudaCheck(hipGetLastError()); \ } IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log, log) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, log1p) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(exp, exp) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(cos, cos) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(acos, acos) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(cosh, cosh) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sin, sin) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(asin, asin) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sinh, sinh) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(tan, tan) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(atan, atan) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(tanh, tanh) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sqrt, sqrt) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(ceil, ceil) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, floor) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(abs, fabs) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(round, roundf) #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC struct TensorAddOp { __device__ __forceinline__ void operator()(float* out, float* in) { *out += *in; } __device__ __forceinline__ void operator()(float* out, float* in1, float* in2) { *out = *in1 + *in2; } }; struct TensorCAddOp { TensorCAddOp(float v) : val(v) {} __device__ __forceinline__ void operator()(float* out, float* in) { *out += val * *in; } __device__ __forceinline__ void operator()(float* out, float* in1, float* in2) { *out = *in1 + val * *in2; } float val; }; void THCudaTensor_cadd(THCState *state, THCudaTensor *self_, THCudaTensor* src1, float value, THCudaTensor *src2) { THAssert(THCudaTensor_checkGPU(state, 3, self_, src1, src2)); THArgCheck(THCudaTensor_nElement(state, src1) == THCudaTensor_nElement(state, src2), 3, "sizes do not match"); if (self_ == src1) { if (value == 1.0f) { // self += src2 if (!THCudaTensor_pointwiseApply2(state, self_, src2, TensorAddOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self += value * src2 if (!THCudaTensor_pointwiseApply2(state, self_, src2, TensorCAddOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCudaTensor_resizeAs(state, self_, src1); if (value == 1.0f) { // self = src1 + src2 if (!THCudaTensor_pointwiseApply3(state, self_, src1, src2, TensorAddOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self = src1 + value * src2 if (!THCudaTensor_pointwiseApply3(state, self_, src1, src2, TensorCAddOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THCudaCheck(hipGetLastError()); } struct TensorMulOp { __device__ __forceinline__ void operator()(float* out, float* in) { *out *= *in; } __device__ __forceinline__ void operator()(float* out, float* in1, float* in2) { *out = *in1 * *in2; } }; void THCudaTensor_cmul(THCState *state, THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THAssert(THCudaTensor_checkGPU(state, 3, self_, src1, src2)); THArgCheck(THCudaTensor_nElement(state, src1) == THCudaTensor_nElement(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self *= src2 if (!THCudaTensor_pointwiseApply2(state, self_, src2, TensorMulOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCudaTensor_resizeAs(state, self_, src1); // self = src1 * src2 if (!THCudaTensor_pointwiseApply3(state, self_, src1, src2, TensorMulOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); }
a116ded09d97477389f0bb639fe3367afa5782a6.cu
#include "THCTensorMath.h" #include "THCGeneral.h" #include "THCBlas.h" #include "THCTensorCopy.h" #include "THCTensorRandom.h" #include "THCApply.cuh" #include "THCReduce.cuh" #include <thrust/device_ptr.h> #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> #ifndef DIVUP #define DIVUP(x, y) (((x) + (y) - 1) / (y)) #endif #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC) \ struct Tensor##NAME##Op { \ __device__ __forceinline__ void operator()(float* out, float* in) const { \ *out = CFUNC(*in); \ } \ \ __device__ __forceinline__ void operator()(float* v) const { \ *v = CFUNC(*v); \ } \ }; \ \ void THCudaTensor_##NAME(THCState* state, THCudaTensor* self_, THCudaTensor* src) { \ THAssert(THCudaTensor_checkGPU(state, 2, self_, src)); \ if (self_ == src) { \ if (!THCudaTensor_pointwiseApply1(state, self_, Tensor##NAME##Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCudaTensor_resizeAs(state, self_, src); \ \ if (!THCudaTensor_pointwiseApply2(state, self_, src, Tensor##NAME##Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ \ THCudaCheck(cudaGetLastError()); \ } IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log, log) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, log1p) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(exp, exp) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(cos, cos) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(acos, acos) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(cosh, cosh) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sin, sin) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(asin, asin) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sinh, sinh) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(tan, tan) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(atan, atan) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(tanh, tanh) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sqrt, sqrt) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(ceil, ceil) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, floor) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(abs, fabs) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(round, roundf) #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC struct TensorAddOp { __device__ __forceinline__ void operator()(float* out, float* in) { *out += *in; } __device__ __forceinline__ void operator()(float* out, float* in1, float* in2) { *out = *in1 + *in2; } }; struct TensorCAddOp { TensorCAddOp(float v) : val(v) {} __device__ __forceinline__ void operator()(float* out, float* in) { *out += val * *in; } __device__ __forceinline__ void operator()(float* out, float* in1, float* in2) { *out = *in1 + val * *in2; } float val; }; void THCudaTensor_cadd(THCState *state, THCudaTensor *self_, THCudaTensor* src1, float value, THCudaTensor *src2) { THAssert(THCudaTensor_checkGPU(state, 3, self_, src1, src2)); THArgCheck(THCudaTensor_nElement(state, src1) == THCudaTensor_nElement(state, src2), 3, "sizes do not match"); if (self_ == src1) { if (value == 1.0f) { // self += src2 if (!THCudaTensor_pointwiseApply2(state, self_, src2, TensorAddOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self += value * src2 if (!THCudaTensor_pointwiseApply2(state, self_, src2, TensorCAddOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCudaTensor_resizeAs(state, self_, src1); if (value == 1.0f) { // self = src1 + src2 if (!THCudaTensor_pointwiseApply3(state, self_, src1, src2, TensorAddOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self = src1 + value * src2 if (!THCudaTensor_pointwiseApply3(state, self_, src1, src2, TensorCAddOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THCudaCheck(cudaGetLastError()); } struct TensorMulOp { __device__ __forceinline__ void operator()(float* out, float* in) { *out *= *in; } __device__ __forceinline__ void operator()(float* out, float* in1, float* in2) { *out = *in1 * *in2; } }; void THCudaTensor_cmul(THCState *state, THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2) { THAssert(THCudaTensor_checkGPU(state, 3, self_, src1, src2)); THArgCheck(THCudaTensor_nElement(state, src1) == THCudaTensor_nElement(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self *= src2 if (!THCudaTensor_pointwiseApply2(state, self_, src2, TensorMulOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCudaTensor_resizeAs(state, self_, src1); // self = src1 * src2 if (!THCudaTensor_pointwiseApply3(state, self_, src1, src2, TensorMulOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); }
4424dbf20ee55f355f937143b2429368edbf84f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef __GPUARRAYMULTIPLICATION #define __GPUARRAYMULTIPLICATION #define BLOCKDIMGEMM 256 template <int K, typename T> __global__ void gpuTemplateGEMM(T *C, T *A, T *B, int I, int J) { int tx = threadIdx.x; // block thread index int idx = tx + blockIdx.x * blockDim.x; // global thread index // load data from global memory to shared memory __shared__ T Ashared[BLOCKDIMGEMM]; if (tx<I*K) Ashared[tx] = A[tx]; while (idx<J) { // load data from global memory to shared memory __shared__ T Bshared[K*BLOCKDIMGEMM]; int i, k; int n = K*tx; int p = K*idx; for (k=0; k<K; k++) Bshared[k+n] = B[k+p]; // thread synchronization __syncthreads(); p = I*idx; T Csub; for (i=0; i<I; i++) { Csub = 0.0; for (k=0; k<K; k++) Csub += Ashared[i+I*k]*Bshared[k+n]; C[i+p] = Csub; } idx += blockDim.x * gridDim.x; } } // C[I*J] = A[I*K] x B[K*J] template <int K, typename T> void gpuGEMM(T *C, T *A, T *B, int I, int J) { int gridDim = (J + BLOCKDIMGEMM - 1) / BLOCKDIMGEMM; gridDim = (gridDim>1024)? 1024 : gridDim; hipLaunchKernelGGL(( gpuTemplateGEMM<K>), dim3(gridDim), dim3(BLOCKDIMGEMM), 0, 0, C, A, B, I, J); } template <typename T> __global__ void gpuTemplateSmallSizeGEMM(T *C, T *A, T *B, int I, int J, int K) { int tx = threadIdx.x; // block thread index int idx = tx + blockIdx.x * blockDim.x; // global thread index int i, k, n, p; // load data from global memory to shared memory __shared__ T Ashared[128]; if (tx<I*K) Ashared[tx] = A[tx]; while (idx<J) { // load data from global memory to shared memory __shared__ T Bshared[1024]; n = K*tx; p = K*idx; for (k=0; k<K; k++) Bshared[k+n] = B[k+p]; // thread synchronization __syncthreads(); p = I*idx; T Csub; for (i=0; i<I; i++) { Csub = 0.0; for (k=0; k<K; k++) Csub += Ashared[i+I*k]*Bshared[k+n]; C[i+p] = Csub; } idx += blockDim.x * gridDim.x; } } template<typename T> __global__ void gpuTemplateBigSizeGEMM(T *C, T *A, T *B, int I, int J, int K) { int tx = threadIdx.x; // block thread index int idx = tx + blockIdx.x * blockDim.x; // global thread index int i, k; i = tx; __shared__ T Ashared[1600]; while (i<I*K) { Ashared[i] = A[i]; i = i + blockDim.x; } while (idx<J) { __shared__ T Bshared[2048]; int n = K*tx; int p = K*idx; for (k=0; k<K; k++) Bshared[k+n] = B[k+p]; // thread synchronization __syncthreads(); p = I*idx; T Csub; for (i=0; i<I; i++) { Csub = 0.0; for (k=0; k<K; k++) Csub += Ashared[i+I*k]*Bshared[k+n]; C[i+p] = Csub; } idx += blockDim.x * gridDim.x; } } // C[I*J] = A[I*K] x B[K*J] template <typename T> void gpuGEMM(T *C, T *A, T *B, int I, int J, int K) { if (K<9) { int BLOCKDIM = 256; if (K>4) BLOCKDIM = 128; int gridDim = (J + BLOCKDIM - 1) / BLOCKDIM; gridDim = (gridDim>1024)? 1024 : gridDim; hipLaunchKernelGGL(( gpuTemplateSmallSizeGEMM), dim3(gridDim), dim3(BLOCKDIM), 0, 0, C, A, B, I, J, K); } else { int BLOCKDIM = 64; if (K>24) BLOCKDIM = 32; int gridDim = (J + BLOCKDIM - 1) / BLOCKDIM; gridDim = (gridDim>1024)? 1024 : gridDim; hipLaunchKernelGGL(( gpuTemplateBigSizeGEMM), dim3(gridDim), dim3(BLOCKDIM), 0, 0, C, A, B, I, J, K); } } template <int N1, int N2, typename T> void gpuTensorGEMM2D(T *C, T *A1, T *A2, T *B, T *Ctmp, int *index, int M1, int M2, int K) { /* B = (A1 x A2) B = A2*B*A1' = (A1*(A2*B)')' INPUT: A1[M1 by N1], A2[M2 by N2], B[N1 by N2 by K] OUTPUT: B[M1 by M2 by K] */ int BLOCKDIM; // Ctmp := A2*B -> Ctmp[M2xN1xK] := A2[M2xN2]*B[N2xN1xK] gpuGEMM<N2>(Ctmp, A2, B, M2, N1*K); // C = permute(Ctmp, [2 1 3]) -> C[N1xM2xK] := Ctmp[M2xN1xK] if (M2*N1<17) BLOCKDIM = M2*N1*32; else BLOCKDIM = M2*N1*16; if (M2*N1==64) BLOCKDIM = 256; gpuIndexPermute12(index, M2, N1, BLOCKDIM/(M2*N1)); gpuPermuteSharedMem(C, Ctmp, index, M2*N1*K, BLOCKDIM); // Ctmp = A2*C -> Ctmp[M2xM1xK] = A1[M1xN1]*C[N1xM2xK] gpuGEMM<N1>(Ctmp, A1, C, M1, M2*K); // C = permute(Ctmp, [2 1 3]) -> C[M2xM1xK] := Ctmp[M1xM2xK] if (M2*M1<17) BLOCKDIM = M2*M1*32; else BLOCKDIM = M2*M1*16; if (M2*M1==64) BLOCKDIM = 256; gpuIndexPermute12(index, M2, M1, BLOCKDIM/(M2*M1)); gpuPermuteSharedMem(C, Ctmp, index, M1*M2*K, BLOCKDIM); } template <typename T> void gpuTensorGEMM2D(T *C, T *A1, T *A2, T *B, T *Ctmp, int *index, int M1, int N1, int M2, int N2, int K) { /* B = (A1 x A2) B = A2*B*A1' = (A1*(A2*B)')' INPUT: A1[M1 by N1], A2[M2 by N2], B[N1 by N2 by K] OUTPUT: B[M1 by M2 by K] */ int BLOCKDIM; // Ctmp := A2*B -> Ctmp[M2xN1xK] := A2[M2xN2]*B[N2xN1xK] gpuGEMM(Ctmp, A2, B, M2, N1*K, N2); //gpuGEMM<4>(Ctmp, A2, B, M2, N1*K); // C = permute(Ctmp, [2 1 3]) -> C[N1xM2xK] := Ctmp[M2xN1xK] if (M2*N1<17) BLOCKDIM = M2*N1*32; else BLOCKDIM = M2*N1*16; if (M2*N1==64) BLOCKDIM = 256; gpuIndexPermute12(index, M2, N1, BLOCKDIM/(M2*N1)); gpuPermuteSharedMem(C, Ctmp, index, M2*N1*K, BLOCKDIM); // Ctmp = A2*C -> Ctmp[M2xM1xK] = A1[M1xN1]*C[N1xM2xK] gpuGEMM(Ctmp, A1, C, M1, M2*K, N1); //gpuGEMM<4>(Ctmp, A1, C, M1, M2*K); // C = permute(Ctmp, [2 1 3]) -> C[M2xM1xK] := Ctmp[M1xM2xK] if (M2*M1<17) BLOCKDIM = M2*M1*32; else BLOCKDIM = M2*M1*16; if (M2*M1==64) BLOCKDIM = 256; gpuIndexPermute12(index, M2, M1, BLOCKDIM/(M2*M1)); gpuPermuteSharedMem(C, Ctmp, index, M1*M2*K, BLOCKDIM); } template <typename T> void gpuTensorGEMM3D(T *C, T *A1, T *A2, T *A3, T *B, T* Ctmp, int *index, int M1, int N1, int M2, int N2, int M3, int N3, int K) { /* B = (A1 x A2 x A3) B INPUT: A1[M1 by N1], A2[M2 by N2], A3[M3 by N3], B[N1 by N2 by N3 by K] OUTPUT: B[M1 by M2 by M3 by K] */ int BLOCKDIM; // Ctmp := A3*B -> Ctmp[M3xN2xN1xK] := A3[M3xN3]*B[N3xN2xN1xK] gpuGEMM(Ctmp, A3, B, M3, N2*N1*K, N3); //gpuGEMM<5>(Ctmp, A3, B, M3, N2*N1*K); // C = permute(Ctmp, [2 1 3]) -> Ctmp[N2xM3xN1*K] := C[M3xN2xN1xK] if (M3*N2<17) BLOCKDIM = M3*N2*32; else BLOCKDIM = M3*N2*16; if (M3*N2==64) BLOCKDIM = 256; gpuIndexPermute12(index, M3, N2, BLOCKDIM/(M3*N2)); gpuPermuteSharedMem(C, Ctmp, index, M3*N2*N1*K, BLOCKDIM); // Ctmp = A2*C -> Ctmp[M2xM3xN1xK] = A2[M2xN2]*C[N2xM3xN1xK] gpuGEMM(Ctmp, A2, C, M2, M3*N1*K, N2); //gpuGEMM<5>(Ctmp, A2, C, M2, N3*N1*K); // C = permute(Ctmp, [2 1 3]) -> C[N1xM2xM3xK] := Ctmp[M2xM3xN1xK] //gpuPermute12(C, Ctmp, M2*M3, N1, K); int N = M2*M3*N1; if (N<33) BLOCKDIM = N*32; else if (N<65) BLOCKDIM = N*16; else if (N<129) BLOCKDIM = N*8; else if (N<257) BLOCKDIM = N*4; else if (N<513) BLOCKDIM = N*2; else BLOCKDIM = N; if (N==64) BLOCKDIM = 256; if (N==216) BLOCKDIM = 864; if (N==343) BLOCKDIM = 686; if (N==512) BLOCKDIM = 1024; gpuIndexPermute12(index, M2*M3, N1, BLOCKDIM/N); gpuPermuteSharedMem(C, Ctmp, index, N*K, BLOCKDIM); // Ctmp := A1*B -> Ctmp[M1xM2xM3xK] := A1[M1xN1]*C[N1xM2xM3xK] gpuGEMM(Ctmp, A1, C, M1, M2*M3*K, N1); //gpuGEMM<5>(Ctmp, A1, C, M1, M2*M1*K); // C = permute(Ctmp, [3 2 1 4]) -> C[M3xM2xM1xK] := Ctmp[M1xM2xM3xK] N = M2*M3*M1; if (N<33) BLOCKDIM = N*32; else if (N<65) BLOCKDIM = N*16; else if (N<129) BLOCKDIM = N*8; else if (N<257) BLOCKDIM = N*4; else if (N<513) BLOCKDIM = N*2; else BLOCKDIM = N; if (N==64) BLOCKDIM = 256; if (N==216) BLOCKDIM = 864; if (N==343) BLOCKDIM = 686; if (N==512) BLOCKDIM = 1024; gpuIndexPermute13(index, M1, M2, M3, BLOCKDIM/N); gpuPermuteSharedMem(C, Ctmp, index, N*K, BLOCKDIM); } template <int N1, int N2, int N3, typename T> void gpuTensorGEMM3D(T *C, T *A1, T *A2, T *A3, T *B, T* Ctmp, int *index, int M1, int M2, int M3, int K) { /* B = (A1 x A2 x A3) B INPUT: A1[M1 by N1], A2[M2 by N2], A3[M3 by N3], B[N1 by N2 by N3 by K] OUTPUT: B[M1 by M2 by M3 by K] */ int BLOCKDIM; // Ctmp := A3*B -> Ctmp[M3xN2xN1xK] := A3[M3xN3]*B[N3xN2xN1xK] gpuGEMM<N3>(Ctmp, A3, B, M3, N2*N1*K); // C = permute(Ctmp, [2 1 3]) -> Ctmp[N2xM3xN1*K] := C[M3xN2xN1xK] if (M3*N2<17) BLOCKDIM = M3*N2*32; else BLOCKDIM = M3*N2*16; if (M3*N2==64) BLOCKDIM = 256; gpuIndexPermute12(index, M3, N2, BLOCKDIM/(M3*N2)); gpuPermuteSharedMem(C, Ctmp, index, M3*N2*N1*K, BLOCKDIM); // Ctmp = A2*C -> Ctmp[M2xM3xN1xK] = A2[M2xN2]*C[N2xM3xN1xK] gpuGEMM<N2>(Ctmp, A2, C, M2, M3*N1*K); // C = permute(Ctmp, [2 1 3]) -> C[N1xM2xM3xK] := Ctmp[M2xM3xN1xK] //gpuPermute12(C, Ctmp, M2*M3, N1, K); int N = M2*M3*N1; if (N<33) BLOCKDIM = N*32; else if (N<65) BLOCKDIM = N*16; else if (N<129) BLOCKDIM = N*8; else if (N<257) BLOCKDIM = N*4; else if (N<513) BLOCKDIM = N*2; else BLOCKDIM = N; if (N==64) BLOCKDIM = 256; if (N==216) BLOCKDIM = 864; if (N==343) BLOCKDIM = 686; if (N==512) BLOCKDIM = 1024; gpuIndexPermute12(index, M2*M3, N1, BLOCKDIM/N); gpuPermuteSharedMem(C, Ctmp, index, N*K, BLOCKDIM); // Ctmp := A1*B -> Ctmp[M1xM2xM3xK] := A1[M1xN1]*C[N1xM2xM3xK] gpuGEMM<N1>(Ctmp, A1, C, M1, M2*M3*K); // C = permute(Ctmp, [3 2 1 4]) -> C[M3xM2xM1xK] := Ctmp[M1xM2xM3xK] if (N<33) BLOCKDIM = N*32; else if (N<65) BLOCKDIM = N*16; else if (N<129) BLOCKDIM = N*8; else if (N<257) BLOCKDIM = N*4; else if (N<513) BLOCKDIM = N*2; else BLOCKDIM = N; if (N==64) BLOCKDIM = 256; if (N==216) BLOCKDIM = 864; if (N==343) BLOCKDIM = 686; if (N==512) BLOCKDIM = 1024; gpuIndexPermute13(index, M1, M2, M3, BLOCKDIM/N); gpuPermuteSharedMem(C, Ctmp, index, N*K, BLOCKDIM); } template <typename T> void gpuTensorGEMM(T *C, T *A1, T *A2, T *A3, T *B, T* Ctmp, int *index, int M1, int N1, int M2, int N2, int M3, int N3, int K) { if (M3*N3==0) { gpuTensorGEMM2D(C, A1, A2, B, Ctmp, index, M1, N1, M2, N2, K); /* if ((N1==3) && (N2==3)) gpuTensorGEMM2D<3,3>(C, A1, A2, B, Ctmp, index, M1, M2, K); else if ((N1==4) && (N2==4)) gpuTensorGEMM2D<4,4>(C, A1, A2, B, Ctmp, index, M1, M2, K); else if ((N1==5) && (N2==5)) gpuTensorGEMM2D<5,5>(C, A1, A2, B, Ctmp, index, M1, M2, K); else if ((N1==6) && (N2==6)) gpuTensorGEMM2D<6,6>(C, A1, A2, B, Ctmp, index, M1, M2, K); else if ((N1==7) && (N2==7)) gpuTensorGEMM2D<7,7>(C, A1, A2, B, Ctmp, index, M1, M2, K); else if ((N1==8) && (N2==8)) gpuTensorGEMM2D<8,8>(C, A1, A2, B, Ctmp, index, M1, M2, K); else { } */ } else { gpuTensorGEMM3D(C, A1, A2, A3, B, Ctmp, index, M1, N1, M2, N2, M3, N3, K); /* if ((N1==3) && (N2==3) && (N3==3)) gpuTensorGEMM3D<3,3,3>(C, A1, A2, A3, B, Ctmp, index, M1, M2, M3, K); else if ((N1==4) && (N2==4) && (N3==4)) gpuTensorGEMM3D<4,4,4>(C, A1, A2, A3, B, Ctmp, index, M1, M2, M3, K); else if ((N1==5) && (N2==5) && (N3==5)) gpuTensorGEMM3D<5,5,5>(C, A1, A2, A3, B, Ctmp, index, M1, M2, M3, K); else if ((N1==6) && (N2==6) && (N3==6)) gpuTensorGEMM3D<6,6,6>(C, A1, A2, A3, B, Ctmp, index, M1, M2, M3, K); else if ((N1==7) && (N2==7) && (N3==7)) gpuTensorGEMM3D<7,7,7>(C, A1, A2, A3, B, Ctmp, index, M1, M2, M3, K); else if ((N1==8) && (N2==8) && (N3==8)) gpuTensorGEMM3D<8,8,8>(C, A1, A2, A3, B, Ctmp, index, M1, M2, M3, K); else { } */ } } template void gpuGEMM(double*, double*, double*, int, int, int); template void gpuGEMM(float*, float*, float*, int, int, int); template void gpuTensorGEMM2D(double*, double*, double*, double*, double*, int*, int, int, int, int, int); template void gpuTensorGEMM2D(float*, float*, float*, float*, float*, int*, int, int, int, int, int); template void gpuTensorGEMM3D(double*, double*, double*, double*, double*, double*, int*, int, int, int, int, int, int, int); template void gpuTensorGEMM3D(float*, float*, float*, float*, float*, float*, int*, int, int, int, int, int, int, int); template<int> void gpuGEMM(double*, double*, double*, int, int); template<int> void gpuGEMM(float*, float*, float*, int, int); template<int, int> void gpuTensorGEMM2D(double*, double*, double*, double*, double*, int*, int, int, int); template<int, int> void gpuTensorGEMM2D(float*, float*, float*, float*, float*, int*, int, int, int); template<int, int, int> void gpuTensorGEMM3D(double*, double*, double*, double*, double*, double*, int*, int, int, int, int); template<int, int, int> void gpuTensorGEMM3D(float*, float*, float*, float*, float*, float*, int*, int, int, int, int); template void gpuTensorGEMM(double*, double*, double*, double*, double*, double*, int*, int, int, int, int, int, int, int); template void gpuTensorGEMM(float*, float*, float*, float*, float*, float*, int*, int, int, int, int, int, int, int); #endif
4424dbf20ee55f355f937143b2429368edbf84f5.cu
#ifndef __GPUARRAYMULTIPLICATION #define __GPUARRAYMULTIPLICATION #define BLOCKDIMGEMM 256 template <int K, typename T> __global__ void gpuTemplateGEMM(T *C, T *A, T *B, int I, int J) { int tx = threadIdx.x; // block thread index int idx = tx + blockIdx.x * blockDim.x; // global thread index // load data from global memory to shared memory __shared__ T Ashared[BLOCKDIMGEMM]; if (tx<I*K) Ashared[tx] = A[tx]; while (idx<J) { // load data from global memory to shared memory __shared__ T Bshared[K*BLOCKDIMGEMM]; int i, k; int n = K*tx; int p = K*idx; for (k=0; k<K; k++) Bshared[k+n] = B[k+p]; // thread synchronization __syncthreads(); p = I*idx; T Csub; for (i=0; i<I; i++) { Csub = 0.0; for (k=0; k<K; k++) Csub += Ashared[i+I*k]*Bshared[k+n]; C[i+p] = Csub; } idx += blockDim.x * gridDim.x; } } // C[I*J] = A[I*K] x B[K*J] template <int K, typename T> void gpuGEMM(T *C, T *A, T *B, int I, int J) { int gridDim = (J + BLOCKDIMGEMM - 1) / BLOCKDIMGEMM; gridDim = (gridDim>1024)? 1024 : gridDim; gpuTemplateGEMM<K><<<gridDim, BLOCKDIMGEMM>>>(C, A, B, I, J); } template <typename T> __global__ void gpuTemplateSmallSizeGEMM(T *C, T *A, T *B, int I, int J, int K) { int tx = threadIdx.x; // block thread index int idx = tx + blockIdx.x * blockDim.x; // global thread index int i, k, n, p; // load data from global memory to shared memory __shared__ T Ashared[128]; if (tx<I*K) Ashared[tx] = A[tx]; while (idx<J) { // load data from global memory to shared memory __shared__ T Bshared[1024]; n = K*tx; p = K*idx; for (k=0; k<K; k++) Bshared[k+n] = B[k+p]; // thread synchronization __syncthreads(); p = I*idx; T Csub; for (i=0; i<I; i++) { Csub = 0.0; for (k=0; k<K; k++) Csub += Ashared[i+I*k]*Bshared[k+n]; C[i+p] = Csub; } idx += blockDim.x * gridDim.x; } } template<typename T> __global__ void gpuTemplateBigSizeGEMM(T *C, T *A, T *B, int I, int J, int K) { int tx = threadIdx.x; // block thread index int idx = tx + blockIdx.x * blockDim.x; // global thread index int i, k; i = tx; __shared__ T Ashared[1600]; while (i<I*K) { Ashared[i] = A[i]; i = i + blockDim.x; } while (idx<J) { __shared__ T Bshared[2048]; int n = K*tx; int p = K*idx; for (k=0; k<K; k++) Bshared[k+n] = B[k+p]; // thread synchronization __syncthreads(); p = I*idx; T Csub; for (i=0; i<I; i++) { Csub = 0.0; for (k=0; k<K; k++) Csub += Ashared[i+I*k]*Bshared[k+n]; C[i+p] = Csub; } idx += blockDim.x * gridDim.x; } } // C[I*J] = A[I*K] x B[K*J] template <typename T> void gpuGEMM(T *C, T *A, T *B, int I, int J, int K) { if (K<9) { int BLOCKDIM = 256; if (K>4) BLOCKDIM = 128; int gridDim = (J + BLOCKDIM - 1) / BLOCKDIM; gridDim = (gridDim>1024)? 1024 : gridDim; gpuTemplateSmallSizeGEMM<<<gridDim, BLOCKDIM>>>(C, A, B, I, J, K); } else { int BLOCKDIM = 64; if (K>24) BLOCKDIM = 32; int gridDim = (J + BLOCKDIM - 1) / BLOCKDIM; gridDim = (gridDim>1024)? 1024 : gridDim; gpuTemplateBigSizeGEMM<<<gridDim, BLOCKDIM>>>(C, A, B, I, J, K); } } template <int N1, int N2, typename T> void gpuTensorGEMM2D(T *C, T *A1, T *A2, T *B, T *Ctmp, int *index, int M1, int M2, int K) { /* B = (A1 x A2) B = A2*B*A1' = (A1*(A2*B)')' INPUT: A1[M1 by N1], A2[M2 by N2], B[N1 by N2 by K] OUTPUT: B[M1 by M2 by K] */ int BLOCKDIM; // Ctmp := A2*B -> Ctmp[M2xN1xK] := A2[M2xN2]*B[N2xN1xK] gpuGEMM<N2>(Ctmp, A2, B, M2, N1*K); // C = permute(Ctmp, [2 1 3]) -> C[N1xM2xK] := Ctmp[M2xN1xK] if (M2*N1<17) BLOCKDIM = M2*N1*32; else BLOCKDIM = M2*N1*16; if (M2*N1==64) BLOCKDIM = 256; gpuIndexPermute12(index, M2, N1, BLOCKDIM/(M2*N1)); gpuPermuteSharedMem(C, Ctmp, index, M2*N1*K, BLOCKDIM); // Ctmp = A2*C -> Ctmp[M2xM1xK] = A1[M1xN1]*C[N1xM2xK] gpuGEMM<N1>(Ctmp, A1, C, M1, M2*K); // C = permute(Ctmp, [2 1 3]) -> C[M2xM1xK] := Ctmp[M1xM2xK] if (M2*M1<17) BLOCKDIM = M2*M1*32; else BLOCKDIM = M2*M1*16; if (M2*M1==64) BLOCKDIM = 256; gpuIndexPermute12(index, M2, M1, BLOCKDIM/(M2*M1)); gpuPermuteSharedMem(C, Ctmp, index, M1*M2*K, BLOCKDIM); } template <typename T> void gpuTensorGEMM2D(T *C, T *A1, T *A2, T *B, T *Ctmp, int *index, int M1, int N1, int M2, int N2, int K) { /* B = (A1 x A2) B = A2*B*A1' = (A1*(A2*B)')' INPUT: A1[M1 by N1], A2[M2 by N2], B[N1 by N2 by K] OUTPUT: B[M1 by M2 by K] */ int BLOCKDIM; // Ctmp := A2*B -> Ctmp[M2xN1xK] := A2[M2xN2]*B[N2xN1xK] gpuGEMM(Ctmp, A2, B, M2, N1*K, N2); //gpuGEMM<4>(Ctmp, A2, B, M2, N1*K); // C = permute(Ctmp, [2 1 3]) -> C[N1xM2xK] := Ctmp[M2xN1xK] if (M2*N1<17) BLOCKDIM = M2*N1*32; else BLOCKDIM = M2*N1*16; if (M2*N1==64) BLOCKDIM = 256; gpuIndexPermute12(index, M2, N1, BLOCKDIM/(M2*N1)); gpuPermuteSharedMem(C, Ctmp, index, M2*N1*K, BLOCKDIM); // Ctmp = A2*C -> Ctmp[M2xM1xK] = A1[M1xN1]*C[N1xM2xK] gpuGEMM(Ctmp, A1, C, M1, M2*K, N1); //gpuGEMM<4>(Ctmp, A1, C, M1, M2*K); // C = permute(Ctmp, [2 1 3]) -> C[M2xM1xK] := Ctmp[M1xM2xK] if (M2*M1<17) BLOCKDIM = M2*M1*32; else BLOCKDIM = M2*M1*16; if (M2*M1==64) BLOCKDIM = 256; gpuIndexPermute12(index, M2, M1, BLOCKDIM/(M2*M1)); gpuPermuteSharedMem(C, Ctmp, index, M1*M2*K, BLOCKDIM); } template <typename T> void gpuTensorGEMM3D(T *C, T *A1, T *A2, T *A3, T *B, T* Ctmp, int *index, int M1, int N1, int M2, int N2, int M3, int N3, int K) { /* B = (A1 x A2 x A3) B INPUT: A1[M1 by N1], A2[M2 by N2], A3[M3 by N3], B[N1 by N2 by N3 by K] OUTPUT: B[M1 by M2 by M3 by K] */ int BLOCKDIM; // Ctmp := A3*B -> Ctmp[M3xN2xN1xK] := A3[M3xN3]*B[N3xN2xN1xK] gpuGEMM(Ctmp, A3, B, M3, N2*N1*K, N3); //gpuGEMM<5>(Ctmp, A3, B, M3, N2*N1*K); // C = permute(Ctmp, [2 1 3]) -> Ctmp[N2xM3xN1*K] := C[M3xN2xN1xK] if (M3*N2<17) BLOCKDIM = M3*N2*32; else BLOCKDIM = M3*N2*16; if (M3*N2==64) BLOCKDIM = 256; gpuIndexPermute12(index, M3, N2, BLOCKDIM/(M3*N2)); gpuPermuteSharedMem(C, Ctmp, index, M3*N2*N1*K, BLOCKDIM); // Ctmp = A2*C -> Ctmp[M2xM3xN1xK] = A2[M2xN2]*C[N2xM3xN1xK] gpuGEMM(Ctmp, A2, C, M2, M3*N1*K, N2); //gpuGEMM<5>(Ctmp, A2, C, M2, N3*N1*K); // C = permute(Ctmp, [2 1 3]) -> C[N1xM2xM3xK] := Ctmp[M2xM3xN1xK] //gpuPermute12(C, Ctmp, M2*M3, N1, K); int N = M2*M3*N1; if (N<33) BLOCKDIM = N*32; else if (N<65) BLOCKDIM = N*16; else if (N<129) BLOCKDIM = N*8; else if (N<257) BLOCKDIM = N*4; else if (N<513) BLOCKDIM = N*2; else BLOCKDIM = N; if (N==64) BLOCKDIM = 256; if (N==216) BLOCKDIM = 864; if (N==343) BLOCKDIM = 686; if (N==512) BLOCKDIM = 1024; gpuIndexPermute12(index, M2*M3, N1, BLOCKDIM/N); gpuPermuteSharedMem(C, Ctmp, index, N*K, BLOCKDIM); // Ctmp := A1*B -> Ctmp[M1xM2xM3xK] := A1[M1xN1]*C[N1xM2xM3xK] gpuGEMM(Ctmp, A1, C, M1, M2*M3*K, N1); //gpuGEMM<5>(Ctmp, A1, C, M1, M2*M1*K); // C = permute(Ctmp, [3 2 1 4]) -> C[M3xM2xM1xK] := Ctmp[M1xM2xM3xK] N = M2*M3*M1; if (N<33) BLOCKDIM = N*32; else if (N<65) BLOCKDIM = N*16; else if (N<129) BLOCKDIM = N*8; else if (N<257) BLOCKDIM = N*4; else if (N<513) BLOCKDIM = N*2; else BLOCKDIM = N; if (N==64) BLOCKDIM = 256; if (N==216) BLOCKDIM = 864; if (N==343) BLOCKDIM = 686; if (N==512) BLOCKDIM = 1024; gpuIndexPermute13(index, M1, M2, M3, BLOCKDIM/N); gpuPermuteSharedMem(C, Ctmp, index, N*K, BLOCKDIM); } template <int N1, int N2, int N3, typename T> void gpuTensorGEMM3D(T *C, T *A1, T *A2, T *A3, T *B, T* Ctmp, int *index, int M1, int M2, int M3, int K) { /* B = (A1 x A2 x A3) B INPUT: A1[M1 by N1], A2[M2 by N2], A3[M3 by N3], B[N1 by N2 by N3 by K] OUTPUT: B[M1 by M2 by M3 by K] */ int BLOCKDIM; // Ctmp := A3*B -> Ctmp[M3xN2xN1xK] := A3[M3xN3]*B[N3xN2xN1xK] gpuGEMM<N3>(Ctmp, A3, B, M3, N2*N1*K); // C = permute(Ctmp, [2 1 3]) -> Ctmp[N2xM3xN1*K] := C[M3xN2xN1xK] if (M3*N2<17) BLOCKDIM = M3*N2*32; else BLOCKDIM = M3*N2*16; if (M3*N2==64) BLOCKDIM = 256; gpuIndexPermute12(index, M3, N2, BLOCKDIM/(M3*N2)); gpuPermuteSharedMem(C, Ctmp, index, M3*N2*N1*K, BLOCKDIM); // Ctmp = A2*C -> Ctmp[M2xM3xN1xK] = A2[M2xN2]*C[N2xM3xN1xK] gpuGEMM<N2>(Ctmp, A2, C, M2, M3*N1*K); // C = permute(Ctmp, [2 1 3]) -> C[N1xM2xM3xK] := Ctmp[M2xM3xN1xK] //gpuPermute12(C, Ctmp, M2*M3, N1, K); int N = M2*M3*N1; if (N<33) BLOCKDIM = N*32; else if (N<65) BLOCKDIM = N*16; else if (N<129) BLOCKDIM = N*8; else if (N<257) BLOCKDIM = N*4; else if (N<513) BLOCKDIM = N*2; else BLOCKDIM = N; if (N==64) BLOCKDIM = 256; if (N==216) BLOCKDIM = 864; if (N==343) BLOCKDIM = 686; if (N==512) BLOCKDIM = 1024; gpuIndexPermute12(index, M2*M3, N1, BLOCKDIM/N); gpuPermuteSharedMem(C, Ctmp, index, N*K, BLOCKDIM); // Ctmp := A1*B -> Ctmp[M1xM2xM3xK] := A1[M1xN1]*C[N1xM2xM3xK] gpuGEMM<N1>(Ctmp, A1, C, M1, M2*M3*K); // C = permute(Ctmp, [3 2 1 4]) -> C[M3xM2xM1xK] := Ctmp[M1xM2xM3xK] if (N<33) BLOCKDIM = N*32; else if (N<65) BLOCKDIM = N*16; else if (N<129) BLOCKDIM = N*8; else if (N<257) BLOCKDIM = N*4; else if (N<513) BLOCKDIM = N*2; else BLOCKDIM = N; if (N==64) BLOCKDIM = 256; if (N==216) BLOCKDIM = 864; if (N==343) BLOCKDIM = 686; if (N==512) BLOCKDIM = 1024; gpuIndexPermute13(index, M1, M2, M3, BLOCKDIM/N); gpuPermuteSharedMem(C, Ctmp, index, N*K, BLOCKDIM); } template <typename T> void gpuTensorGEMM(T *C, T *A1, T *A2, T *A3, T *B, T* Ctmp, int *index, int M1, int N1, int M2, int N2, int M3, int N3, int K) { if (M3*N3==0) { gpuTensorGEMM2D(C, A1, A2, B, Ctmp, index, M1, N1, M2, N2, K); /* if ((N1==3) && (N2==3)) gpuTensorGEMM2D<3,3>(C, A1, A2, B, Ctmp, index, M1, M2, K); else if ((N1==4) && (N2==4)) gpuTensorGEMM2D<4,4>(C, A1, A2, B, Ctmp, index, M1, M2, K); else if ((N1==5) && (N2==5)) gpuTensorGEMM2D<5,5>(C, A1, A2, B, Ctmp, index, M1, M2, K); else if ((N1==6) && (N2==6)) gpuTensorGEMM2D<6,6>(C, A1, A2, B, Ctmp, index, M1, M2, K); else if ((N1==7) && (N2==7)) gpuTensorGEMM2D<7,7>(C, A1, A2, B, Ctmp, index, M1, M2, K); else if ((N1==8) && (N2==8)) gpuTensorGEMM2D<8,8>(C, A1, A2, B, Ctmp, index, M1, M2, K); else { } */ } else { gpuTensorGEMM3D(C, A1, A2, A3, B, Ctmp, index, M1, N1, M2, N2, M3, N3, K); /* if ((N1==3) && (N2==3) && (N3==3)) gpuTensorGEMM3D<3,3,3>(C, A1, A2, A3, B, Ctmp, index, M1, M2, M3, K); else if ((N1==4) && (N2==4) && (N3==4)) gpuTensorGEMM3D<4,4,4>(C, A1, A2, A3, B, Ctmp, index, M1, M2, M3, K); else if ((N1==5) && (N2==5) && (N3==5)) gpuTensorGEMM3D<5,5,5>(C, A1, A2, A3, B, Ctmp, index, M1, M2, M3, K); else if ((N1==6) && (N2==6) && (N3==6)) gpuTensorGEMM3D<6,6,6>(C, A1, A2, A3, B, Ctmp, index, M1, M2, M3, K); else if ((N1==7) && (N2==7) && (N3==7)) gpuTensorGEMM3D<7,7,7>(C, A1, A2, A3, B, Ctmp, index, M1, M2, M3, K); else if ((N1==8) && (N2==8) && (N3==8)) gpuTensorGEMM3D<8,8,8>(C, A1, A2, A3, B, Ctmp, index, M1, M2, M3, K); else { } */ } } template void gpuGEMM(double*, double*, double*, int, int, int); template void gpuGEMM(float*, float*, float*, int, int, int); template void gpuTensorGEMM2D(double*, double*, double*, double*, double*, int*, int, int, int, int, int); template void gpuTensorGEMM2D(float*, float*, float*, float*, float*, int*, int, int, int, int, int); template void gpuTensorGEMM3D(double*, double*, double*, double*, double*, double*, int*, int, int, int, int, int, int, int); template void gpuTensorGEMM3D(float*, float*, float*, float*, float*, float*, int*, int, int, int, int, int, int, int); template<int> void gpuGEMM(double*, double*, double*, int, int); template<int> void gpuGEMM(float*, float*, float*, int, int); template<int, int> void gpuTensorGEMM2D(double*, double*, double*, double*, double*, int*, int, int, int); template<int, int> void gpuTensorGEMM2D(float*, float*, float*, float*, float*, int*, int, int, int); template<int, int, int> void gpuTensorGEMM3D(double*, double*, double*, double*, double*, double*, int*, int, int, int, int); template<int, int, int> void gpuTensorGEMM3D(float*, float*, float*, float*, float*, float*, int*, int, int, int, int); template void gpuTensorGEMM(double*, double*, double*, double*, double*, double*, int*, int, int, int, int, int, int, int); template void gpuTensorGEMM(float*, float*, float*, float*, float*, float*, int*, int, int, int, int, int, int, int); #endif
058ce41f97b5d1ced179fe825a02a1e327746a68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // write your code into this file // your kernels can be implemented directly here, or included // function solveGPU is a device function: it can allocate memory, call CUDA kernels etc. #define DEBUGNUMFPU 32 #define BLOCK 128 __global__ void GPUiter(const int* const contacts, const int* const in, int* const infections, const int n, const int iter, int* const out){ __shared__ int neighborhood[3][BLOCK+3]; int x = (blockIdx.x * (blockDim.x * 2)) + threadIdx.x; int y = blockIdx.y; int maxIdx = min(BLOCK, n-(blockIdx.x * (blockDim.x * 2))); int pos = y*n + x; int offset = maxIdx >> 1; if(x + offset < n && y < n){ if(threadIdx.x == 0){ neighborhood[0][0] = x != 0 && y != 0 ? in[(y-1)*n + (x-1)] : 0; neighborhood[1][0] = x != 0 ? in[pos-1] : 0; neighborhood[2][0] = x != 0 && y < n - 1 ? in[(y+1)*n + (x-1)] : 0; } if(threadIdx.x + offset == maxIdx-1){ if(maxIdx == BLOCK){ neighborhood[0][maxIdx + 1] = blockIdx.x < ceil((float)n/BLOCK) - 1 && blockIdx.y != 0 ? in[(y-1) * n + (x + 1 + offset)] : 0; neighborhood[1][maxIdx + 1] = blockIdx.x < ceil((float)n/BLOCK) - 1 ? in[y * n + (x + 1 + offset)] : 0; neighborhood[2][maxIdx + 1] = blockIdx.y < n - 1 && blockIdx.x < ceil((float)n/BLOCK) - 1 ? in[(y+1)*n + (x + 1 + offset)] : 0; } else{ // maxIdx is less than BLOCK (ie N == 160) neighborhood[0][maxIdx + 1] = 0; neighborhood[1][maxIdx + 1] = 0; neighborhood[2][maxIdx + 1] = 0; } } neighborhood[0][threadIdx.x + 1] = blockIdx.y != 0 ? in[(y-1)*n + x] : 0; neighborhood[0][threadIdx.x + 1 + offset] = blockIdx.y != 0 ? in[(y-1)*n + x + offset] : 0; neighborhood[1][threadIdx.x + 1] = in[pos]; neighborhood[1][threadIdx.x + 1 + offset] = in[pos + offset]; neighborhood[2][threadIdx.x + 1] = blockIdx.y < n - 1 ? in[(y+1)*n + x] : 0; neighborhood[2][threadIdx.x + 1 + offset] = blockIdx.y < n - 1 ? in[(y+1)*n + x + offset] : 0; } __syncthreads(); if(x + offset < n && y < n){ int in_pos = neighborhood[1][threadIdx.x + 1]; if (in_pos > 0) { out[pos] = in_pos - 1 == 0 ? -30 : in_pos - 1; } if (in_pos < 0) { out[pos] = in_pos + 1; } if (in_pos == 0) { int infected = 0; infected += (neighborhood[0][threadIdx.x] > 0) ? 1 : 0; infected += (neighborhood[0][threadIdx.x + 1] > 0) ? 1 : 0; infected += (neighborhood[0][threadIdx.x + 2] > 0) ? 1 : 0; infected += (neighborhood[1][threadIdx.x] > 0) ? 1 : 0; infected += (neighborhood[1][threadIdx.x + 2] > 0) ? 1 : 0; infected += (neighborhood[2][threadIdx.x] > 0) ? 1 : 0; infected += (neighborhood[2][threadIdx.x + 1] > 0) ? 1 : 0; infected += (neighborhood[2][threadIdx.x + 2] > 0) ? 1 : 0; if (infected > contacts[pos]) { out[pos] = 10; atomicAdd(&infections[iter], 1); } else{ out[pos] = 0; } } int in_pos2 = neighborhood[1][threadIdx.x + 1 + offset]; if(in_pos2 > 0){ out[pos + offset] = in_pos2 - 1 == 0 ? -30 : in_pos2 - 1; } if(in_pos2 < 0){ out[pos + offset] = in_pos2 + 1; } if (in_pos2 == 0) { int infected2 = 0; infected2 += (neighborhood[0][threadIdx.x + offset] > 0) ? 1 : 0; infected2 += (neighborhood[0][threadIdx.x + 1 + offset] > 0) ? 1 : 0; infected2 += (neighborhood[0][threadIdx.x + 2 + offset] > 0) ? 1 : 0; infected2 += (neighborhood[1][threadIdx.x + offset] > 0) ? 1 : 0; infected2 += (neighborhood[1][threadIdx.x + 2 + offset] > 0) ? 1 : 0; infected2 += (neighborhood[2][threadIdx.x + offset] > 0) ? 1 : 0; infected2 += (neighborhood[2][threadIdx.x + 1 + offset] > 0) ? 1 : 0; infected2 += (neighborhood[2][threadIdx.x + 2 + offset] > 0) ? 1 : 0; if (infected2 > contacts[pos + offset]) { out[pos + offset] = 10; atomicAdd(&infections[iter], 1); } else{ out[pos + offset] = 0; } } } } void solveGPU(const int* const contacts, int* const town, int* const infections, const int n, const int iters) { int* in = town; int* out; if(hipMalloc((void**)&out, n * n * sizeof(out[0])) != hipSuccess){ fprintf(stderr, "CudaMalloc failed ...\n"); return; } dim3 gridSize; dim3 blockSize; // If N is less than block, we reduce the amount of threads per block if(n < BLOCK){ gridSize.x = 1; gridSize.y = n; gridSize.z = 1; blockSize.x = n >> 1; blockSize.y = 1; blockSize.z = 1; } else{ gridSize.x = ceil((float)n/BLOCK); gridSize.y = n; gridSize.z = 1; blockSize.x = BLOCK >> 1; blockSize.y = 1; blockSize.z = 1; } printf("GridSize: %d %d \n", gridSize.x, gridSize.y); printf("BlockSize: %d %d \n", blockSize.x, blockSize.y); for(int i = 0; i < iters; i++){ hipLaunchKernelGGL(( GPUiter), dim3(gridSize), dim3(blockSize), 0, 0, contacts, in, infections, n, i, out); int* tmp = in; in = out; out = tmp; } if (in != town) { hipMemcpy(town, in, n * n * sizeof(town[0]), hipMemcpyDeviceToDevice); hipFree(in); } else { hipFree(out); } }
058ce41f97b5d1ced179fe825a02a1e327746a68.cu
// write your code into this file // your kernels can be implemented directly here, or included // function solveGPU is a device function: it can allocate memory, call CUDA kernels etc. #define DEBUGNUMFPU 32 #define BLOCK 128 __global__ void GPUiter(const int* const contacts, const int* const in, int* const infections, const int n, const int iter, int* const out){ __shared__ int neighborhood[3][BLOCK+3]; int x = (blockIdx.x * (blockDim.x * 2)) + threadIdx.x; int y = blockIdx.y; int maxIdx = min(BLOCK, n-(blockIdx.x * (blockDim.x * 2))); int pos = y*n + x; int offset = maxIdx >> 1; if(x + offset < n && y < n){ if(threadIdx.x == 0){ neighborhood[0][0] = x != 0 && y != 0 ? in[(y-1)*n + (x-1)] : 0; neighborhood[1][0] = x != 0 ? in[pos-1] : 0; neighborhood[2][0] = x != 0 && y < n - 1 ? in[(y+1)*n + (x-1)] : 0; } if(threadIdx.x + offset == maxIdx-1){ if(maxIdx == BLOCK){ neighborhood[0][maxIdx + 1] = blockIdx.x < ceil((float)n/BLOCK) - 1 && blockIdx.y != 0 ? in[(y-1) * n + (x + 1 + offset)] : 0; neighborhood[1][maxIdx + 1] = blockIdx.x < ceil((float)n/BLOCK) - 1 ? in[y * n + (x + 1 + offset)] : 0; neighborhood[2][maxIdx + 1] = blockIdx.y < n - 1 && blockIdx.x < ceil((float)n/BLOCK) - 1 ? in[(y+1)*n + (x + 1 + offset)] : 0; } else{ // maxIdx is less than BLOCK (ie N == 160) neighborhood[0][maxIdx + 1] = 0; neighborhood[1][maxIdx + 1] = 0; neighborhood[2][maxIdx + 1] = 0; } } neighborhood[0][threadIdx.x + 1] = blockIdx.y != 0 ? in[(y-1)*n + x] : 0; neighborhood[0][threadIdx.x + 1 + offset] = blockIdx.y != 0 ? in[(y-1)*n + x + offset] : 0; neighborhood[1][threadIdx.x + 1] = in[pos]; neighborhood[1][threadIdx.x + 1 + offset] = in[pos + offset]; neighborhood[2][threadIdx.x + 1] = blockIdx.y < n - 1 ? in[(y+1)*n + x] : 0; neighborhood[2][threadIdx.x + 1 + offset] = blockIdx.y < n - 1 ? in[(y+1)*n + x + offset] : 0; } __syncthreads(); if(x + offset < n && y < n){ int in_pos = neighborhood[1][threadIdx.x + 1]; if (in_pos > 0) { out[pos] = in_pos - 1 == 0 ? -30 : in_pos - 1; } if (in_pos < 0) { out[pos] = in_pos + 1; } if (in_pos == 0) { int infected = 0; infected += (neighborhood[0][threadIdx.x] > 0) ? 1 : 0; infected += (neighborhood[0][threadIdx.x + 1] > 0) ? 1 : 0; infected += (neighborhood[0][threadIdx.x + 2] > 0) ? 1 : 0; infected += (neighborhood[1][threadIdx.x] > 0) ? 1 : 0; infected += (neighborhood[1][threadIdx.x + 2] > 0) ? 1 : 0; infected += (neighborhood[2][threadIdx.x] > 0) ? 1 : 0; infected += (neighborhood[2][threadIdx.x + 1] > 0) ? 1 : 0; infected += (neighborhood[2][threadIdx.x + 2] > 0) ? 1 : 0; if (infected > contacts[pos]) { out[pos] = 10; atomicAdd(&infections[iter], 1); } else{ out[pos] = 0; } } int in_pos2 = neighborhood[1][threadIdx.x + 1 + offset]; if(in_pos2 > 0){ out[pos + offset] = in_pos2 - 1 == 0 ? -30 : in_pos2 - 1; } if(in_pos2 < 0){ out[pos + offset] = in_pos2 + 1; } if (in_pos2 == 0) { int infected2 = 0; infected2 += (neighborhood[0][threadIdx.x + offset] > 0) ? 1 : 0; infected2 += (neighborhood[0][threadIdx.x + 1 + offset] > 0) ? 1 : 0; infected2 += (neighborhood[0][threadIdx.x + 2 + offset] > 0) ? 1 : 0; infected2 += (neighborhood[1][threadIdx.x + offset] > 0) ? 1 : 0; infected2 += (neighborhood[1][threadIdx.x + 2 + offset] > 0) ? 1 : 0; infected2 += (neighborhood[2][threadIdx.x + offset] > 0) ? 1 : 0; infected2 += (neighborhood[2][threadIdx.x + 1 + offset] > 0) ? 1 : 0; infected2 += (neighborhood[2][threadIdx.x + 2 + offset] > 0) ? 1 : 0; if (infected2 > contacts[pos + offset]) { out[pos + offset] = 10; atomicAdd(&infections[iter], 1); } else{ out[pos + offset] = 0; } } } } void solveGPU(const int* const contacts, int* const town, int* const infections, const int n, const int iters) { int* in = town; int* out; if(cudaMalloc((void**)&out, n * n * sizeof(out[0])) != cudaSuccess){ fprintf(stderr, "CudaMalloc failed ...\n"); return; } dim3 gridSize; dim3 blockSize; // If N is less than block, we reduce the amount of threads per block if(n < BLOCK){ gridSize.x = 1; gridSize.y = n; gridSize.z = 1; blockSize.x = n >> 1; blockSize.y = 1; blockSize.z = 1; } else{ gridSize.x = ceil((float)n/BLOCK); gridSize.y = n; gridSize.z = 1; blockSize.x = BLOCK >> 1; blockSize.y = 1; blockSize.z = 1; } printf("GridSize: %d %d \n", gridSize.x, gridSize.y); printf("BlockSize: %d %d \n", blockSize.x, blockSize.y); for(int i = 0; i < iters; i++){ GPUiter<<<gridSize, blockSize>>>(contacts, in, infections, n, i, out); int* tmp = in; in = out; out = tmp; } if (in != town) { cudaMemcpy(town, in, n * n * sizeof(town[0]), cudaMemcpyDeviceToDevice); cudaFree(in); } else { cudaFree(out); } }
d7e64dc86e93bfed4d440464a344f41c11a4d389.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "updateState.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *B = NULL; hipMalloc(&B, XSIZE*YSIZE); float *external = NULL; hipMalloc(&external, XSIZE*YSIZE); int dim = 2; float timestep = 1; int length = 1; float L = 1; float M = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( updateState), dim3(gridBlock),dim3(threadBlock), 0, 0, B,external,dim,timestep,length,L,M); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( updateState), dim3(gridBlock),dim3(threadBlock), 0, 0, B,external,dim,timestep,length,L,M); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( updateState), dim3(gridBlock),dim3(threadBlock), 0, 0, B,external,dim,timestep,length,L,M); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d7e64dc86e93bfed4d440464a344f41c11a4d389.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "updateState.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); float *external = NULL; cudaMalloc(&external, XSIZE*YSIZE); int dim = 2; float timestep = 1; int length = 1; float L = 1; float M = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); updateState<<<gridBlock,threadBlock>>>(B,external,dim,timestep,length,L,M); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { updateState<<<gridBlock,threadBlock>>>(B,external,dim,timestep,length,L,M); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { updateState<<<gridBlock,threadBlock>>>(B,external,dim,timestep,length,L,M); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a304633725a53e82d738dc4c5d7e398fa06fe5d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <vector> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function_impl.h" namespace phi { namespace funcs { // The following part of the code refers to NVIDIA-cutlass // https://github.com/NVIDIA/cutlass/blob/master/tools/util/include/cutlass/util/device_nchw_to_nhwc.h // Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights // reserved. SPDX-License-Identifier: BSD-3-Clause template <typename T> __global__ void batch_transpose_kernel(T* output, const T* input, const int batch, const int M, const int N, int swizzle) { const int num = M * N; // "+1" to avoid smem bank conflict __shared__ T shbuf[32 * (32 + 1)]; const int32_t tid = threadIdx.y * blockDim.x + threadIdx.x; const int32_t wid = tid / 32; const int32_t lid = tid % 32; const int32_t batch_i = blockIdx.z; const int32_t mi0 = (blockIdx.y * swizzle + blockIdx.x % swizzle) * 32; const int32_t ni0 = blockIdx.x / swizzle * 32; const size_t input_idx = batch_i * num + (mi0 + wid) * N + ni0; const T* A = input + input_idx; if (ni0 + lid < N) { const int lid_x_33 = lid * 33; if ((mi0 + 32) <= M) { int mi = wid; // between 0 and 7 #pragma unroll for (int mLoopIdx = 0; mLoopIdx < 4; mLoopIdx++) { shbuf[lid_x_33 + mi] = A[lid]; A = &A[8 * N]; mi += 8; } } else { for (int mi = wid; mi < 32; mi += 8) { if ((mi + mi0) < M) { shbuf[lid_x_33 + mi] = A[lid]; } A = &A[8 * N]; } } } __syncthreads(); const int32_t miOut = mi0 + lid; output = &output[batch_i * num + miOut]; if (miOut < M) { if (ni0 + 32 < N) { int nI = wid; #pragma unroll for (int nLoopIdx = 0; nLoopIdx < 4; ++nLoopIdx) { output[(ni0 + nI) * M] = shbuf[(nI)*33 + lid]; nI += 8; } } else { for (int nI = wid; nI < 32; nI += 8) { if (ni0 + nI < N) { output[(ni0 + nI) * M] = shbuf[(nI)*33 + lid]; } } } } } template <typename T> void BatchTranspose(T* output, const T* input, int64_t batch, int64_t m, int64_t n, const phi::GPUContext* dev_ctx) { int64_t device_id = dev_ctx->GetPlace().GetDeviceId(); const auto& prop = phi::backends::gpu::GetDeviceProperties(device_id); int max_grid_y = prop.maxGridSize[1]; int64_t input_num = batch * m * n; if (input_num >= std::numeric_limits<int>::max()) { PADDLE_THROW(phi::errors::Unimplemented( "Unsupported input size, batch: %ld,m: %ld, n: %ld", batch, m, n)); } dim3 logical_grid((n + 31) / 32, (m + 31) / 32, batch); dim3 block(32, 8); // we set swizzle to 2 default. int swizzle = (logical_grid.y + max_grid_y - 1) / max_grid_y; swizzle = ::max(swizzle, 2); dim3 physical_grid(logical_grid.x * swizzle, (logical_grid.y + swizzle - 1) / swizzle, batch); hipLaunchKernelGGL(( batch_transpose_kernel), dim3(physical_grid), dim3(block), 0, 0, output, input, batch, m, n, swizzle); } using float16 = phi::dtype::float16; using bfloat16 = phi::dtype::bfloat16; template void BatchTranspose(float16* output, const float16* input, int64_t batch, int64_t m, int64_t n, const phi::GPUContext* dev_ctx); template void BatchTranspose(float* output, const float* input, int64_t batch, int64_t m, int64_t n, const phi::GPUContext* dev_ctx); template void BatchTranspose(bfloat16* output, const bfloat16* input, int64_t batch, int64_t m, int64_t n, const phi::GPUContext* dev_ctx); template struct SetConstant<phi::GPUContext, float16>; template struct SetConstant<phi::GPUContext, bfloat16>; template struct SetConstant<phi::GPUContext, float>; template struct SetConstant<phi::GPUContext, double>; template struct SetConstant<phi::GPUContext, uint8_t>; template struct SetConstant<phi::GPUContext, int>; template struct SetConstant<phi::GPUContext, int16_t>; template struct SetConstant<phi::GPUContext, int64_t>; template struct SetConstant<phi::GPUContext, bool>; template struct SetConstant<phi::GPUContext, phi::dtype::complex<float>>; template struct SetConstant<phi::GPUContext, phi::dtype::complex<double>>; template struct SetConstant<phi::GPUPinnedContext, float16>; template struct SetConstant<phi::GPUPinnedContext, bfloat16>; template struct SetConstant<phi::GPUPinnedContext, float>; template struct SetConstant<phi::GPUPinnedContext, double>; template struct SetConstant<phi::GPUPinnedContext, uint8_t>; template struct SetConstant<phi::GPUPinnedContext, int>; template struct SetConstant<phi::GPUPinnedContext, int16_t>; template struct SetConstant<phi::GPUPinnedContext, int64_t>; template struct SetConstant<phi::GPUPinnedContext, bool>; template struct SetConstant<phi::GPUPinnedContext, phi::dtype::complex<float>>; template struct SetConstant<phi::GPUPinnedContext, phi::dtype::complex<double>>; #define DEFINE_GPU_TRANS(RANK) \ template struct Transpose<phi::GPUContext, bool, RANK>; \ template struct Transpose<phi::GPUContext, unsigned char, RANK>; \ template struct Transpose<phi::GPUContext, float, RANK>; \ template struct Transpose<phi::GPUContext, double, RANK>; \ template struct Transpose<phi::GPUContext, float16, RANK>; \ template struct Transpose<phi::GPUContext, bfloat16, RANK>; \ template struct Transpose<phi::GPUContext, int8_t, RANK>; \ template struct Transpose<phi::GPUContext, int16_t, RANK>; \ template struct Transpose<phi::GPUContext, int32_t, RANK>; \ template struct Transpose<phi::GPUContext, int64_t, RANK>; \ template struct Transpose<phi::GPUContext, \ phi::dtype::complex<float>, \ RANK>; \ template struct Transpose<phi::GPUContext, phi::dtype::complex<double>, RANK>; DEFINE_GPU_TRANS(1); DEFINE_GPU_TRANS(2); DEFINE_GPU_TRANS(3); DEFINE_GPU_TRANS(4); DEFINE_GPU_TRANS(5); DEFINE_GPU_TRANS(6); #define REINTERPRET(T, DST_PTR, SRC_PTR) \ T* DST_PTR = reinterpret_cast<T*>(SRC_PTR) template <typename T> __global__ void TransposeNormalKernel(const T* in_ptr, T* out_ptr, int64_t element, const int64_t* in_stride_ptr, const int64_t* out_stride_ptr, const int64_t* axis_ptr, int rank) { CUDA_KERNEL_LOOP(out_idx, element) { int64_t in_idx = 0; int64_t tmp_idx = out_idx; for (int i = 0; i < rank; ++i) { const int64_t coordinate = tmp_idx / out_stride_ptr[i]; tmp_idx -= coordinate * out_stride_ptr[i]; in_idx += coordinate * in_stride_ptr[axis_ptr[i]]; } out_ptr[out_idx] = in_ptr[in_idx]; } } template <typename DeviceContext, typename T> void TransposeNormal<DeviceContext, T>::operator()( const DeviceContext& context, const phi::DenseTensor& in, phi::DenseTensor* out, const std::vector<int>& axis) { const int rank = axis.size(); auto in_stride = phi::stride(in.dims()); auto out_stride = phi::stride(out->dims()); auto* in_ptr = in.data<T>(); auto* out_ptr = out->data<T>(); // copy in_stride, out_stride, axis to gpu device const phi::GPUPlace& cuda_place = context.GetPlace(); phi::CPUPlace cpu_place = phi::CPUPlace(); size_t size = 3 * rank * sizeof(int64_t); auto cpu_buf_holder = phi::memory_utils::Alloc(cpu_place, size); auto cuda_buf_holder = phi::memory_utils::Alloc(cuda_place, size); REINTERPRET(int64_t, cpu_buf, cpu_buf_holder->ptr()); REINTERPRET(int64_t, cuda_buf, cuda_buf_holder->ptr()); for (int i = 0; i < rank; ++i) { cpu_buf[i] = in_stride[i]; cpu_buf[rank + i] = out_stride[i]; cpu_buf[2 * rank + i] = axis[i]; } memory_utils::Copy( cuda_place, cuda_buf, cpu_place, cpu_buf, size, context.stream()); REINTERPRET(const int64_t, in_stride_ptr, cuda_buf); REINTERPRET(const int64_t, out_stride_ptr, cuda_buf + rank); REINTERPRET(const int64_t, axis_ptr, cuda_buf + 2 * rank); const int MAX_BLOCK_DIM = context.GetMaxThreadsPerBlock(); const int MAX_GRID_DIM = context.GetMaxPhysicalThreadCount() / MAX_BLOCK_DIM; int64_t elements = in.numel(); int block_size = (elements >= MAX_BLOCK_DIM) ? MAX_BLOCK_DIM : (1 << static_cast<int>(std::log2(elements))); int grid_size = elements / block_size; grid_size = (grid_size >= MAX_GRID_DIM) ? MAX_GRID_DIM : grid_size; hipLaunchKernelGGL(( TransposeNormalKernel<T>), dim3(grid_size), dim3(block_size), 0, context.stream(), in_ptr, out_ptr, elements, in_stride_ptr, out_stride_ptr, axis_ptr, rank); } template <typename T> struct TransposeNormal<phi::GPUContext, T> { void operator()(const phi::GPUContext& context, const DenseTensor& in, DenseTensor* out, const std::vector<int>& axis) { const int rank = axis.size(); auto in_stride = stride(in.dims()); auto out_stride = stride(out->dims()); auto* in_ptr = in.data<T>(); auto* out_ptr = out->data<T>(); // copy in_stride, out_stride, axis to gpu device const phi::GPUPlace& cuda_place = context.GetPlace(); phi::CPUPlace cpu_place = phi::CPUPlace(); size_t size = 3 * rank * sizeof(int64_t); auto cpu_buf_holder = phi::memory_utils::Alloc(cpu_place, size); auto cuda_buf_holder = phi::memory_utils::Alloc(cuda_place, size); REINTERPRET(int64_t, cpu_buf, cpu_buf_holder->ptr()); REINTERPRET(int64_t, cuda_buf, cuda_buf_holder->ptr()); for (int i = 0; i < rank; ++i) { cpu_buf[i] = in_stride[i]; cpu_buf[rank + i] = out_stride[i]; cpu_buf[2 * rank + i] = axis[i]; } memory_utils::Copy( cuda_place, cuda_buf, cpu_place, cpu_buf, size, context.stream()); REINTERPRET(const int64_t, in_stride_ptr, cuda_buf); REINTERPRET(const int64_t, out_stride_ptr, cuda_buf + rank); REINTERPRET(const int64_t, axis_ptr, cuda_buf + 2 * rank); const int MAX_BLOCK_DIM = context.GetMaxThreadsPerBlock(); const int MAX_GRID_DIM = context.GetMaxPhysicalThreadCount() / MAX_BLOCK_DIM; int64_t elements = in.numel(); int block_size = (elements >= MAX_BLOCK_DIM) ? MAX_BLOCK_DIM : (1 << static_cast<int>(std::log2(elements))); int grid_size = elements / block_size; grid_size = (grid_size >= MAX_GRID_DIM) ? MAX_GRID_DIM : grid_size; hipLaunchKernelGGL(( TransposeNormalKernel<T>) , dim3(grid_size), dim3(block_size), 0, context.stream(), in_ptr, out_ptr, elements, in_stride_ptr, out_stride_ptr, axis_ptr, rank); } }; // define transpose normal #define DEFINE_GPU_TRANS_NORMAL(TYPE) \ template struct TransposeNormal<phi::GPUContext, TYPE> DEFINE_GPU_TRANS_NORMAL(float16); DEFINE_GPU_TRANS_NORMAL(bfloat16); DEFINE_GPU_TRANS_NORMAL(float); DEFINE_GPU_TRANS_NORMAL(double); DEFINE_GPU_TRANS_NORMAL(int); DEFINE_GPU_TRANS_NORMAL(int64_t); DEFINE_GPU_TRANS_NORMAL(bool); DEFINE_GPU_TRANS_NORMAL(int16_t); DEFINE_GPU_TRANS_NORMAL(uint8_t); DEFINE_GPU_TRANS_NORMAL(int8_t); DEFINE_GPU_TRANS_NORMAL(phi::dtype::complex<float>); DEFINE_GPU_TRANS_NORMAL(phi::dtype::complex<double>); struct TensorSetConstantGPU { TensorSetConstantGPU(const phi::DeviceContext& context, phi::DenseTensor* tensor, float value) : context_(context), tensor_(tensor), value_(value) {} template <typename T> void apply() const { SetConstant<phi::GPUContext, T> functor; functor(reinterpret_cast<const phi::GPUContext&>(context_), tensor_, static_cast<T>(value_)); } const phi::DeviceContext& context_; phi::DenseTensor* tensor_; float value_; }; template <> void set_constant_with_place<phi::GPUPlace>(const phi::DeviceContext& context, phi::DenseTensor* tensor, float value) { phi::VisitDataType(tensor->dtype(), TensorSetConstantGPU(context, tensor, value)); } template <typename T> __global__ void RowwiseAddKernel( const T* a, const T* b, T* c, int width, int num) { T tmp = 1.0 / width; CUDA_KERNEL_LOOP(i, num) { int h = i * tmp; int w = i - h * width; c[i] = a[i] + b[w]; } } template <typename T> struct RowwiseAdd<phi::GPUContext, T> { void operator()(const phi::GPUContext& context, const phi::DenseTensor& input, const phi::DenseTensor& vector, phi::DenseTensor* output) { auto in_dims = input.dims(); auto out_dims = output->dims(); auto size = input.numel() / in_dims[0]; PADDLE_ENFORCE_EQ( vector.numel(), size, phi::errors::InvalidArgument( "The input vector size" " should be equal to the size of each row of input tensor." " Expected vector size=%d, but received %d", size, vector.numel())); const char* in_dims_cstr = in_dims.to_str().c_str(); const char* out_dims_cstr = out_dims.to_str().c_str(); PADDLE_ENFORCE_EQ( out_dims, in_dims, phi::errors::InvalidArgument( "The output tensor shape should be same as the input tensor" " shape. Expected output tensor shape: %s," " but received %s", in_dims_cstr, out_dims_cstr)); int blocks = 512; int grids = (input.numel() + blocks - 1) / blocks; hipLaunchKernelGGL(( RowwiseAddKernel<T>), dim3(grids), dim3(blocks), 0, context.stream(), input.data<T>(), vector.data<T>(), output->data<T>(), static_cast<int>(in_dims[1]), static_cast<int>(input.numel())); } }; template struct RowwiseAdd<phi::GPUContext, float>; template struct RowwiseAdd<phi::GPUContext, double>; template struct ColwiseSum<phi::GPUContext, float>; template struct ColwiseSum<phi::GPUContext, int>; template struct ColwiseSum<phi::GPUContext, int64_t>; // template struct ColwiseSum<phi::GPUContext, double>; // The ColwiseSum<phi::GPUContext, double> failed in debug // mode, // and only failed for this case. So reimplemented it. template <> void ColwiseSum<phi::GPUContext, double>::operator()( const phi::GPUContext& context, const phi::DenseTensor& input, phi::DenseTensor* vector) { auto in_dims = input.dims(); auto size = input.numel() / in_dims[0]; PADDLE_ENFORCE_EQ(vector->numel(), size, phi::errors::InvalidArgument( "The size of input vector" " should be equal to the size of input tensor column" " dimension. Expected vector size=%d, but received %d", size, vector->numel())); phi::DenseTensor one; one.Resize({in_dims[0]}); context.template Alloc<double>(&one); SetConstant<phi::GPUContext, double> set; set(context, &one, static_cast<double>(1.0)); phi::funcs::GetBlas<phi::GPUContext, double>(context).GEMV( true, static_cast<int>(in_dims[0]), static_cast<int>(in_dims[1]), 1.0, input.data<double>(), one.data<double>(), 0.0, vector->data<double>()); } template struct RowwiseSum<phi::GPUContext, float>; // template struct RowwiseSum<phi::GPUContext, double>; // TODO(zcd): Following ColwiseSum format, need to confirm. // The RowwiseSum<phi::GPUContext, double> failed in debug // mode, // and only failed for this case. So reimplemented it. template <> void RowwiseSum<phi::GPUContext, double>::operator()( const phi::GPUContext& context, const phi::DenseTensor& input, phi::DenseTensor* vector) { auto in_dims = input.dims(); auto size = input.numel() / in_dims[0]; PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0], phi::errors::InvalidArgument( "The size of input vector" " should be equal to the size of input tensor row" " dimension. Expected vector size=%d, but received %d", in_dims[0], vector->numel())); phi::DenseTensor one; one.Resize({size}); context.template Alloc<double>(&one); SetConstant<phi::GPUContext, double> set; set(context, &one, static_cast<double>(1.0)); phi::funcs::GetBlas<phi::GPUContext, double>(context).GEMV( true, static_cast<int>(in_dims[1]), static_cast<int>(in_dims[0]), 1.0, one.data<double>(), input.data<double>(), 0.0, vector->data<double>()); } template struct RowwiseMean<phi::GPUContext, float>; template struct RowwiseMean<phi::GPUContext, double>; } // namespace funcs } // namespace phi
a304633725a53e82d738dc4c5d7e398fa06fe5d1.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <vector> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function_impl.h" namespace phi { namespace funcs { // The following part of the code refers to NVIDIA-cutlass // https://github.com/NVIDIA/cutlass/blob/master/tools/util/include/cutlass/util/device_nchw_to_nhwc.h // Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights // reserved. SPDX-License-Identifier: BSD-3-Clause template <typename T> __global__ void batch_transpose_kernel(T* output, const T* input, const int batch, const int M, const int N, int swizzle) { const int num = M * N; // "+1" to avoid smem bank conflict __shared__ T shbuf[32 * (32 + 1)]; const int32_t tid = threadIdx.y * blockDim.x + threadIdx.x; const int32_t wid = tid / 32; const int32_t lid = tid % 32; const int32_t batch_i = blockIdx.z; const int32_t mi0 = (blockIdx.y * swizzle + blockIdx.x % swizzle) * 32; const int32_t ni0 = blockIdx.x / swizzle * 32; const size_t input_idx = batch_i * num + (mi0 + wid) * N + ni0; const T* A = input + input_idx; if (ni0 + lid < N) { const int lid_x_33 = lid * 33; if ((mi0 + 32) <= M) { int mi = wid; // between 0 and 7 #pragma unroll for (int mLoopIdx = 0; mLoopIdx < 4; mLoopIdx++) { shbuf[lid_x_33 + mi] = A[lid]; A = &A[8 * N]; mi += 8; } } else { for (int mi = wid; mi < 32; mi += 8) { if ((mi + mi0) < M) { shbuf[lid_x_33 + mi] = A[lid]; } A = &A[8 * N]; } } } __syncthreads(); const int32_t miOut = mi0 + lid; output = &output[batch_i * num + miOut]; if (miOut < M) { if (ni0 + 32 < N) { int nI = wid; #pragma unroll for (int nLoopIdx = 0; nLoopIdx < 4; ++nLoopIdx) { output[(ni0 + nI) * M] = shbuf[(nI)*33 + lid]; nI += 8; } } else { for (int nI = wid; nI < 32; nI += 8) { if (ni0 + nI < N) { output[(ni0 + nI) * M] = shbuf[(nI)*33 + lid]; } } } } } template <typename T> void BatchTranspose(T* output, const T* input, int64_t batch, int64_t m, int64_t n, const phi::GPUContext* dev_ctx) { int64_t device_id = dev_ctx->GetPlace().GetDeviceId(); const auto& prop = phi::backends::gpu::GetDeviceProperties(device_id); int max_grid_y = prop.maxGridSize[1]; int64_t input_num = batch * m * n; if (input_num >= std::numeric_limits<int>::max()) { PADDLE_THROW(phi::errors::Unimplemented( "Unsupported input size, batch: %ld,m: %ld, n: %ld", batch, m, n)); } dim3 logical_grid((n + 31) / 32, (m + 31) / 32, batch); dim3 block(32, 8); // we set swizzle to 2 default. int swizzle = (logical_grid.y + max_grid_y - 1) / max_grid_y; swizzle = std::max(swizzle, 2); dim3 physical_grid(logical_grid.x * swizzle, (logical_grid.y + swizzle - 1) / swizzle, batch); batch_transpose_kernel<<<physical_grid, block>>>( output, input, batch, m, n, swizzle); } using float16 = phi::dtype::float16; using bfloat16 = phi::dtype::bfloat16; template void BatchTranspose(float16* output, const float16* input, int64_t batch, int64_t m, int64_t n, const phi::GPUContext* dev_ctx); template void BatchTranspose(float* output, const float* input, int64_t batch, int64_t m, int64_t n, const phi::GPUContext* dev_ctx); template void BatchTranspose(bfloat16* output, const bfloat16* input, int64_t batch, int64_t m, int64_t n, const phi::GPUContext* dev_ctx); template struct SetConstant<phi::GPUContext, float16>; template struct SetConstant<phi::GPUContext, bfloat16>; template struct SetConstant<phi::GPUContext, float>; template struct SetConstant<phi::GPUContext, double>; template struct SetConstant<phi::GPUContext, uint8_t>; template struct SetConstant<phi::GPUContext, int>; template struct SetConstant<phi::GPUContext, int16_t>; template struct SetConstant<phi::GPUContext, int64_t>; template struct SetConstant<phi::GPUContext, bool>; template struct SetConstant<phi::GPUContext, phi::dtype::complex<float>>; template struct SetConstant<phi::GPUContext, phi::dtype::complex<double>>; template struct SetConstant<phi::GPUPinnedContext, float16>; template struct SetConstant<phi::GPUPinnedContext, bfloat16>; template struct SetConstant<phi::GPUPinnedContext, float>; template struct SetConstant<phi::GPUPinnedContext, double>; template struct SetConstant<phi::GPUPinnedContext, uint8_t>; template struct SetConstant<phi::GPUPinnedContext, int>; template struct SetConstant<phi::GPUPinnedContext, int16_t>; template struct SetConstant<phi::GPUPinnedContext, int64_t>; template struct SetConstant<phi::GPUPinnedContext, bool>; template struct SetConstant<phi::GPUPinnedContext, phi::dtype::complex<float>>; template struct SetConstant<phi::GPUPinnedContext, phi::dtype::complex<double>>; #define DEFINE_GPU_TRANS(RANK) \ template struct Transpose<phi::GPUContext, bool, RANK>; \ template struct Transpose<phi::GPUContext, unsigned char, RANK>; \ template struct Transpose<phi::GPUContext, float, RANK>; \ template struct Transpose<phi::GPUContext, double, RANK>; \ template struct Transpose<phi::GPUContext, float16, RANK>; \ template struct Transpose<phi::GPUContext, bfloat16, RANK>; \ template struct Transpose<phi::GPUContext, int8_t, RANK>; \ template struct Transpose<phi::GPUContext, int16_t, RANK>; \ template struct Transpose<phi::GPUContext, int32_t, RANK>; \ template struct Transpose<phi::GPUContext, int64_t, RANK>; \ template struct Transpose<phi::GPUContext, \ phi::dtype::complex<float>, \ RANK>; \ template struct Transpose<phi::GPUContext, phi::dtype::complex<double>, RANK>; DEFINE_GPU_TRANS(1); DEFINE_GPU_TRANS(2); DEFINE_GPU_TRANS(3); DEFINE_GPU_TRANS(4); DEFINE_GPU_TRANS(5); DEFINE_GPU_TRANS(6); #define REINTERPRET(T, DST_PTR, SRC_PTR) \ T* DST_PTR = reinterpret_cast<T*>(SRC_PTR) template <typename T> __global__ void TransposeNormalKernel(const T* in_ptr, T* out_ptr, int64_t element, const int64_t* in_stride_ptr, const int64_t* out_stride_ptr, const int64_t* axis_ptr, int rank) { CUDA_KERNEL_LOOP(out_idx, element) { int64_t in_idx = 0; int64_t tmp_idx = out_idx; for (int i = 0; i < rank; ++i) { const int64_t coordinate = tmp_idx / out_stride_ptr[i]; tmp_idx -= coordinate * out_stride_ptr[i]; in_idx += coordinate * in_stride_ptr[axis_ptr[i]]; } out_ptr[out_idx] = in_ptr[in_idx]; } } template <typename DeviceContext, typename T> void TransposeNormal<DeviceContext, T>::operator()( const DeviceContext& context, const phi::DenseTensor& in, phi::DenseTensor* out, const std::vector<int>& axis) { const int rank = axis.size(); auto in_stride = phi::stride(in.dims()); auto out_stride = phi::stride(out->dims()); auto* in_ptr = in.data<T>(); auto* out_ptr = out->data<T>(); // copy in_stride, out_stride, axis to gpu device const phi::GPUPlace& cuda_place = context.GetPlace(); phi::CPUPlace cpu_place = phi::CPUPlace(); size_t size = 3 * rank * sizeof(int64_t); auto cpu_buf_holder = phi::memory_utils::Alloc(cpu_place, size); auto cuda_buf_holder = phi::memory_utils::Alloc(cuda_place, size); REINTERPRET(int64_t, cpu_buf, cpu_buf_holder->ptr()); REINTERPRET(int64_t, cuda_buf, cuda_buf_holder->ptr()); for (int i = 0; i < rank; ++i) { cpu_buf[i] = in_stride[i]; cpu_buf[rank + i] = out_stride[i]; cpu_buf[2 * rank + i] = axis[i]; } memory_utils::Copy( cuda_place, cuda_buf, cpu_place, cpu_buf, size, context.stream()); REINTERPRET(const int64_t, in_stride_ptr, cuda_buf); REINTERPRET(const int64_t, out_stride_ptr, cuda_buf + rank); REINTERPRET(const int64_t, axis_ptr, cuda_buf + 2 * rank); const int MAX_BLOCK_DIM = context.GetMaxThreadsPerBlock(); const int MAX_GRID_DIM = context.GetMaxPhysicalThreadCount() / MAX_BLOCK_DIM; int64_t elements = in.numel(); int block_size = (elements >= MAX_BLOCK_DIM) ? MAX_BLOCK_DIM : (1 << static_cast<int>(std::log2(elements))); int grid_size = elements / block_size; grid_size = (grid_size >= MAX_GRID_DIM) ? MAX_GRID_DIM : grid_size; TransposeNormalKernel<T><<<grid_size, block_size, 0, context.stream()>>>( in_ptr, out_ptr, elements, in_stride_ptr, out_stride_ptr, axis_ptr, rank); } template <typename T> struct TransposeNormal<phi::GPUContext, T> { void operator()(const phi::GPUContext& context, const DenseTensor& in, DenseTensor* out, const std::vector<int>& axis) { const int rank = axis.size(); auto in_stride = stride(in.dims()); auto out_stride = stride(out->dims()); auto* in_ptr = in.data<T>(); auto* out_ptr = out->data<T>(); // copy in_stride, out_stride, axis to gpu device const phi::GPUPlace& cuda_place = context.GetPlace(); phi::CPUPlace cpu_place = phi::CPUPlace(); size_t size = 3 * rank * sizeof(int64_t); auto cpu_buf_holder = phi::memory_utils::Alloc(cpu_place, size); auto cuda_buf_holder = phi::memory_utils::Alloc(cuda_place, size); REINTERPRET(int64_t, cpu_buf, cpu_buf_holder->ptr()); REINTERPRET(int64_t, cuda_buf, cuda_buf_holder->ptr()); for (int i = 0; i < rank; ++i) { cpu_buf[i] = in_stride[i]; cpu_buf[rank + i] = out_stride[i]; cpu_buf[2 * rank + i] = axis[i]; } memory_utils::Copy( cuda_place, cuda_buf, cpu_place, cpu_buf, size, context.stream()); REINTERPRET(const int64_t, in_stride_ptr, cuda_buf); REINTERPRET(const int64_t, out_stride_ptr, cuda_buf + rank); REINTERPRET(const int64_t, axis_ptr, cuda_buf + 2 * rank); const int MAX_BLOCK_DIM = context.GetMaxThreadsPerBlock(); const int MAX_GRID_DIM = context.GetMaxPhysicalThreadCount() / MAX_BLOCK_DIM; int64_t elements = in.numel(); int block_size = (elements >= MAX_BLOCK_DIM) ? MAX_BLOCK_DIM : (1 << static_cast<int>(std::log2(elements))); int grid_size = elements / block_size; grid_size = (grid_size >= MAX_GRID_DIM) ? MAX_GRID_DIM : grid_size; TransposeNormalKernel<T> <<<grid_size, block_size, 0, context.stream()>>>(in_ptr, out_ptr, elements, in_stride_ptr, out_stride_ptr, axis_ptr, rank); } }; // define transpose normal #define DEFINE_GPU_TRANS_NORMAL(TYPE) \ template struct TransposeNormal<phi::GPUContext, TYPE> DEFINE_GPU_TRANS_NORMAL(float16); DEFINE_GPU_TRANS_NORMAL(bfloat16); DEFINE_GPU_TRANS_NORMAL(float); DEFINE_GPU_TRANS_NORMAL(double); DEFINE_GPU_TRANS_NORMAL(int); DEFINE_GPU_TRANS_NORMAL(int64_t); DEFINE_GPU_TRANS_NORMAL(bool); DEFINE_GPU_TRANS_NORMAL(int16_t); DEFINE_GPU_TRANS_NORMAL(uint8_t); DEFINE_GPU_TRANS_NORMAL(int8_t); DEFINE_GPU_TRANS_NORMAL(phi::dtype::complex<float>); DEFINE_GPU_TRANS_NORMAL(phi::dtype::complex<double>); struct TensorSetConstantGPU { TensorSetConstantGPU(const phi::DeviceContext& context, phi::DenseTensor* tensor, float value) : context_(context), tensor_(tensor), value_(value) {} template <typename T> void apply() const { SetConstant<phi::GPUContext, T> functor; functor(reinterpret_cast<const phi::GPUContext&>(context_), tensor_, static_cast<T>(value_)); } const phi::DeviceContext& context_; phi::DenseTensor* tensor_; float value_; }; template <> void set_constant_with_place<phi::GPUPlace>(const phi::DeviceContext& context, phi::DenseTensor* tensor, float value) { phi::VisitDataType(tensor->dtype(), TensorSetConstantGPU(context, tensor, value)); } template <typename T> __global__ void RowwiseAddKernel( const T* a, const T* b, T* c, int width, int num) { T tmp = 1.0 / width; CUDA_KERNEL_LOOP(i, num) { int h = i * tmp; int w = i - h * width; c[i] = a[i] + b[w]; } } template <typename T> struct RowwiseAdd<phi::GPUContext, T> { void operator()(const phi::GPUContext& context, const phi::DenseTensor& input, const phi::DenseTensor& vector, phi::DenseTensor* output) { auto in_dims = input.dims(); auto out_dims = output->dims(); auto size = input.numel() / in_dims[0]; PADDLE_ENFORCE_EQ( vector.numel(), size, phi::errors::InvalidArgument( "The input vector size" " should be equal to the size of each row of input tensor." " Expected vector size=%d, but received %d", size, vector.numel())); const char* in_dims_cstr = in_dims.to_str().c_str(); const char* out_dims_cstr = out_dims.to_str().c_str(); PADDLE_ENFORCE_EQ( out_dims, in_dims, phi::errors::InvalidArgument( "The output tensor shape should be same as the input tensor" " shape. Expected output tensor shape: %s," " but received %s", in_dims_cstr, out_dims_cstr)); int blocks = 512; int grids = (input.numel() + blocks - 1) / blocks; RowwiseAddKernel<T><<<grids, blocks, 0, context.stream()>>>( input.data<T>(), vector.data<T>(), output->data<T>(), static_cast<int>(in_dims[1]), static_cast<int>(input.numel())); } }; template struct RowwiseAdd<phi::GPUContext, float>; template struct RowwiseAdd<phi::GPUContext, double>; template struct ColwiseSum<phi::GPUContext, float>; template struct ColwiseSum<phi::GPUContext, int>; template struct ColwiseSum<phi::GPUContext, int64_t>; // template struct ColwiseSum<phi::GPUContext, double>; // The ColwiseSum<phi::GPUContext, double> failed in debug // mode, // and only failed for this case. So reimplemented it. template <> void ColwiseSum<phi::GPUContext, double>::operator()( const phi::GPUContext& context, const phi::DenseTensor& input, phi::DenseTensor* vector) { auto in_dims = input.dims(); auto size = input.numel() / in_dims[0]; PADDLE_ENFORCE_EQ(vector->numel(), size, phi::errors::InvalidArgument( "The size of input vector" " should be equal to the size of input tensor column" " dimension. Expected vector size=%d, but received %d", size, vector->numel())); phi::DenseTensor one; one.Resize({in_dims[0]}); context.template Alloc<double>(&one); SetConstant<phi::GPUContext, double> set; set(context, &one, static_cast<double>(1.0)); phi::funcs::GetBlas<phi::GPUContext, double>(context).GEMV( true, static_cast<int>(in_dims[0]), static_cast<int>(in_dims[1]), 1.0, input.data<double>(), one.data<double>(), 0.0, vector->data<double>()); } template struct RowwiseSum<phi::GPUContext, float>; // template struct RowwiseSum<phi::GPUContext, double>; // TODO(zcd): Following ColwiseSum format, need to confirm. // The RowwiseSum<phi::GPUContext, double> failed in debug // mode, // and only failed for this case. So reimplemented it. template <> void RowwiseSum<phi::GPUContext, double>::operator()( const phi::GPUContext& context, const phi::DenseTensor& input, phi::DenseTensor* vector) { auto in_dims = input.dims(); auto size = input.numel() / in_dims[0]; PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0], phi::errors::InvalidArgument( "The size of input vector" " should be equal to the size of input tensor row" " dimension. Expected vector size=%d, but received %d", in_dims[0], vector->numel())); phi::DenseTensor one; one.Resize({size}); context.template Alloc<double>(&one); SetConstant<phi::GPUContext, double> set; set(context, &one, static_cast<double>(1.0)); phi::funcs::GetBlas<phi::GPUContext, double>(context).GEMV( true, static_cast<int>(in_dims[1]), static_cast<int>(in_dims[0]), 1.0, one.data<double>(), input.data<double>(), 0.0, vector->data<double>()); } template struct RowwiseMean<phi::GPUContext, float>; template struct RowwiseMean<phi::GPUContext, double>; } // namespace funcs } // namespace phi
f19bacc502b519b8ef2f178df6ddec30efb35c5a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GaussianDevice.h" #include "Gaussian.h" #include "CudaFuncs.h" #include "device_launch_parameters.h" #include<iostream> #include <algorithm> #include <string> __global__ void gausEliminate_v2(float* mat, float* b, float* column_k, int rows, int cols, int k) { __shared__ float row_k[threads2D]; __shared__ float col_k[threads2D]; __shared__ float pivot; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = k + blockIdx.x * blockDim.x + threadIdx.x; //Fetch data: // Let first warp fetch row: if (threadIdx.y == 0 && col <= cols) row_k[threadIdx.x] = col == cols ? b[k] : mat[k * cols + col]; // Let second warp fetch column: else if (threadIdx.y == 1 && blockIdx.y * blockDim.y + threadIdx.x < rows) col_k[threadIdx.x] = column_k[blockIdx.y * blockDim.y + threadIdx.x]; //Let third warp fetch pivot element: else if (threadIdx.y == 2 && threadIdx.x == 0) pivot = mat[k*cols + k]; syncthreads(); if (col <= cols && row < rows) { if (row != k) { // Find the value of L_(i,j) related to this thread: float elim = col_k[threadIdx.y] / pivot * row_k[threadIdx.x]; //printf("(%d, %d, %f, %f), ", row, col, col_k[threadIdx.y], row_k[threadIdx.x]); if (col == cols) //The last thread column is used to update the vector! b[row] -= elim; else { //printf("i,e: %d, %f\n", block + threadIdx.y * cols + threadIdx.x, elim); mat[(blockIdx.y * blockDim.y + threadIdx.y) * cols + blockIdx.x * blockDim.x + threadIdx.x + k] -= elim; } } } } /* Sovle the remaining diagonal matrix! */ __global__ void solve(float* mat, float* b, float* x, int rows, int cols) { int n = blockIdx.x*threads1D + threadIdx.x; if (n < rows) //Ensure bounds x[n] = b[n] / mat[n * cols + n]; } Vector gaussSolveCudaDevice(Matrix& mat, Vector& b) { int n = mat.row; Vector x(n); //Allocate float* dev_mat = genCpy(mat.arr.get(), mat.col*mat.row); float* dev_b = genCpy(b.arr.get(), n); float* dev_col = allocFloat(mat.row); float* dev_x = allocFloat(n); if (dev_mat && dev_b && dev_x) { dim3 block1D(threads1D, 1, 1); dim3 block2D(threads2D, threads2D, 1); //Execute: for (int k = 0; k < n; k++) { hipLaunchKernelGGL(( greatestRowK) , dim3(1),dim3(block1D), 0, 0, dev_mat, mat.row, mat.col, k); dim3 gridSwap(div_ceil(mat.col+1,threads1D), 1, 1); swapRow << <gridSwap, block1D >> > (dev_mat, dev_b, dev_col, mat.row, mat.col, k); dim3 gridElim(div_ceil(mat.col - k + 1,threads2D), div_ceil(mat.row, threads2D), 1); hipLaunchKernelGGL(( gausEliminate_v2), dim3(gridElim),dim3(block2D), 0, 0, dev_mat, dev_b, dev_col, mat.row, mat.col, k); #ifdef DEBUG read(dev_mat, mat.arr.get(), mat.col*mat.row); read(dev_b, b.arr.get(), n); print(mat); print(b); #endif } dim3 gridSolve(div_ceil(mat.col, threads1D), 1, 1); hipLaunchKernelGGL(( solve), dim3(gridSolve), dim3(block1D), 0, 0, dev_mat, dev_b, dev_x, mat.row, mat.col); //Fetch data read(dev_x, x.arr.get(), n); read(dev_mat, mat.arr.get(), mat.col*mat.row); } hipFree(dev_mat); hipFree(dev_b); hipFree(dev_col); hipFree(dev_x); return x; } #pragma region Tests /* Memory test: * Access column major mem. per warp instead of in one warp. * Conclussion: No difference... */ __global__ void singleWarpTransaction(float* arr, int rows, int cols) { __shared__ float shared[64]; int block = blockIdx.y * blockDim.y * cols + blockIdx.x * blockDim.x; if (threadIdx.y < 2) shared[threadIdx.x + blockDim.x * threadIdx.y] = arr[block + threadIdx.x*cols]; syncthreads(); //Repeat local over block y: arr[block + threadIdx.y * cols + threadIdx.x] = shared[threadIdx.x] + shared[blockDim.x + threadIdx.y]; } __global__ void perWarpTransaction(float* arr, int rows, int cols) { __shared__ float shared[64]; int block = blockIdx.y * blockDim.y * cols + blockIdx.x * blockDim.x; if (threadIdx.x == 0 || threadIdx.y == 0) shared[threadIdx.x + threadIdx.y + blockDim.y * (threadIdx.y > 0)] = arr[block + threadIdx.y*cols + threadIdx.x]; shared[blockDim.x] = shared[0]; syncthreads(); //Repeat local over block y: arr[block + threadIdx.y * cols + threadIdx.x] = shared[threadIdx.x] + shared[blockDim.x + threadIdx.y]; } const int size = 16384 * 2; void singleWarpTransaction() { int arr_size = size * size; float* arr = new float[arr_size]; float* dev_arr = allocFloat(arr_size); if (dev_arr) { //Execute: dim3 block(32, 32, 1); dim3 grid(size / 32, size / 32, 1); singleWarpTransaction<<<grid, block >> > (dev_arr, size, size); } read(dev_arr, arr, arr_size); hipFree(dev_arr); delete[] arr; } void perWarpTransaction() { int arr_size = size * size; float* arr = new float[arr_size]; float* dev_arr = allocFloat(arr_size); if (dev_arr) { //Execute: dim3 block(32, 32, 1); dim3 grid(size/32, size/32, 1); perWarpTransaction << <grid, block >> > (dev_arr, size, size); } read(dev_arr, arr, arr_size); hipFree(dev_arr); delete[] arr; } #pragma endregion
f19bacc502b519b8ef2f178df6ddec30efb35c5a.cu
#include "GaussianDevice.h" #include "Gaussian.h" #include "CudaFuncs.h" #include "device_launch_parameters.h" #include<iostream> #include <algorithm> #include <string> __global__ void gausEliminate_v2(float* mat, float* b, float* column_k, int rows, int cols, int k) { __shared__ float row_k[threads2D]; __shared__ float col_k[threads2D]; __shared__ float pivot; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = k + blockIdx.x * blockDim.x + threadIdx.x; //Fetch data: // Let first warp fetch row: if (threadIdx.y == 0 && col <= cols) row_k[threadIdx.x] = col == cols ? b[k] : mat[k * cols + col]; // Let second warp fetch column: else if (threadIdx.y == 1 && blockIdx.y * blockDim.y + threadIdx.x < rows) col_k[threadIdx.x] = column_k[blockIdx.y * blockDim.y + threadIdx.x]; //Let third warp fetch pivot element: else if (threadIdx.y == 2 && threadIdx.x == 0) pivot = mat[k*cols + k]; syncthreads(); if (col <= cols && row < rows) { if (row != k) { // Find the value of L_(i,j) related to this thread: float elim = col_k[threadIdx.y] / pivot * row_k[threadIdx.x]; //printf("(%d, %d, %f, %f), ", row, col, col_k[threadIdx.y], row_k[threadIdx.x]); if (col == cols) //The last thread column is used to update the vector! b[row] -= elim; else { //printf("i,e: %d, %f\n", block + threadIdx.y * cols + threadIdx.x, elim); mat[(blockIdx.y * blockDim.y + threadIdx.y) * cols + blockIdx.x * blockDim.x + threadIdx.x + k] -= elim; } } } } /* Sovle the remaining diagonal matrix! */ __global__ void solve(float* mat, float* b, float* x, int rows, int cols) { int n = blockIdx.x*threads1D + threadIdx.x; if (n < rows) //Ensure bounds x[n] = b[n] / mat[n * cols + n]; } Vector gaussSolveCudaDevice(Matrix& mat, Vector& b) { int n = mat.row; Vector x(n); //Allocate float* dev_mat = genCpy(mat.arr.get(), mat.col*mat.row); float* dev_b = genCpy(b.arr.get(), n); float* dev_col = allocFloat(mat.row); float* dev_x = allocFloat(n); if (dev_mat && dev_b && dev_x) { dim3 block1D(threads1D, 1, 1); dim3 block2D(threads2D, threads2D, 1); //Execute: for (int k = 0; k < n; k++) { greatestRowK <<<1,block1D>>> (dev_mat, mat.row, mat.col, k); dim3 gridSwap(div_ceil(mat.col+1,threads1D), 1, 1); swapRow << <gridSwap, block1D >> > (dev_mat, dev_b, dev_col, mat.row, mat.col, k); dim3 gridElim(div_ceil(mat.col - k + 1,threads2D), div_ceil(mat.row, threads2D), 1); gausEliminate_v2<<<gridElim,block2D>>>(dev_mat, dev_b, dev_col, mat.row, mat.col, k); #ifdef DEBUG read(dev_mat, mat.arr.get(), mat.col*mat.row); read(dev_b, b.arr.get(), n); print(mat); print(b); #endif } dim3 gridSolve(div_ceil(mat.col, threads1D), 1, 1); solve<<<gridSolve, block1D>>> (dev_mat, dev_b, dev_x, mat.row, mat.col); //Fetch data read(dev_x, x.arr.get(), n); read(dev_mat, mat.arr.get(), mat.col*mat.row); } cudaFree(dev_mat); cudaFree(dev_b); cudaFree(dev_col); cudaFree(dev_x); return x; } #pragma region Tests /* Memory test: * Access column major mem. per warp instead of in one warp. * Conclussion: No difference... */ __global__ void singleWarpTransaction(float* arr, int rows, int cols) { __shared__ float shared[64]; int block = blockIdx.y * blockDim.y * cols + blockIdx.x * blockDim.x; if (threadIdx.y < 2) shared[threadIdx.x + blockDim.x * threadIdx.y] = arr[block + threadIdx.x*cols]; syncthreads(); //Repeat local over block y: arr[block + threadIdx.y * cols + threadIdx.x] = shared[threadIdx.x] + shared[blockDim.x + threadIdx.y]; } __global__ void perWarpTransaction(float* arr, int rows, int cols) { __shared__ float shared[64]; int block = blockIdx.y * blockDim.y * cols + blockIdx.x * blockDim.x; if (threadIdx.x == 0 || threadIdx.y == 0) shared[threadIdx.x + threadIdx.y + blockDim.y * (threadIdx.y > 0)] = arr[block + threadIdx.y*cols + threadIdx.x]; shared[blockDim.x] = shared[0]; syncthreads(); //Repeat local over block y: arr[block + threadIdx.y * cols + threadIdx.x] = shared[threadIdx.x] + shared[blockDim.x + threadIdx.y]; } const int size = 16384 * 2; void singleWarpTransaction() { int arr_size = size * size; float* arr = new float[arr_size]; float* dev_arr = allocFloat(arr_size); if (dev_arr) { //Execute: dim3 block(32, 32, 1); dim3 grid(size / 32, size / 32, 1); singleWarpTransaction<<<grid, block >> > (dev_arr, size, size); } read(dev_arr, arr, arr_size); cudaFree(dev_arr); delete[] arr; } void perWarpTransaction() { int arr_size = size * size; float* arr = new float[arr_size]; float* dev_arr = allocFloat(arr_size); if (dev_arr) { //Execute: dim3 block(32, 32, 1); dim3 grid(size/32, size/32, 1); perWarpTransaction << <grid, block >> > (dev_arr, size, size); } read(dev_arr, arr, arr_size); cudaFree(dev_arr); delete[] arr; } #pragma endregion
18de35452f2499e4cac0a65af17e4496b104170f.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHTensorMath.h> #include <THH/THHGeneral.h> #include <THH/THHTensorCopy.h> #include <THH/THHApply.cuh> #include <THH/THHNumerics.cuh> #include <THH/THHThrustAllocator.cuh> #include <THH/THHTensor.hpp> #include <thrust/copy.h> #include <thrust/count.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/transform.h> #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ #include <thrust/system/hip/execution_policy.h> #endif #include <cfloat> template <typename T> struct TensorFillOp { TensorFillOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* v) { *v = val; } const T val; }; // copypasta from https://github.com/thrust/thrust/blob/master/examples/strided_range.cu template <typename Iterator> class strided_range { public: typedef typename thrust::iterator_difference<Iterator>::type difference_type; struct stride_functor : public thrust::unary_function<difference_type, difference_type> { difference_type stride; stride_functor(difference_type stride) : stride(stride) {} __host__ __device__ difference_type operator()(const difference_type& i) const { return stride * i; } }; typedef typename thrust::counting_iterator<difference_type> CountingIterator; typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator; typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator; // type of the strided_range iterator typedef PermutationIterator iterator; // construct strided_range for the range [first,last) strided_range(Iterator first, Iterator last, difference_type stride) : first(first), last(last), stride(stride) {} iterator begin(void) const { return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride))); } iterator end(void) const { return begin() + ((last - first) + (stride - 1)) / stride; } protected: Iterator first; Iterator last; difference_type stride; }; struct idx_functor { int64_t div; int64_t size; __host__ __device__ idx_functor(int64_t div, int64_t size) : div(div), size(size) {} __host__ __device__ int64_t operator()(int64_t val) { return (val / div) % size; } }; template <typename T> struct NonZeroOp { NonZeroOp() {} __host__ __device__ bool operator()(T lhs) const { if (THCNumerics<T>::ne(lhs, ScalarConvert<float, T>::to(0.0))) { return true; } else { return false; } } }; template <> struct NonZeroOp<bool> { NonZeroOp() {} __host__ __device__ bool operator()(bool lhs) const { return lhs != false; } }; #include <THH/generic/THHTensorMath.hip> #include <THH/THHGenerateAllTypes.h> #include <THH/generic/THHTensorMath.hip> #include <THH/THHGenerateBoolType.h> #include <THH/generic/THHTensorMath.hip> #include <THH/THHGenerateBFloat16Type.h>
18de35452f2499e4cac0a65af17e4496b104170f.cu
#include <THC/THCTensorMath.h> #include <THC/THCGeneral.h> #include <THC/THCTensorCopy.h> #include <THC/THCApply.cuh> #include <THC/THCNumerics.cuh> #include <THC/THCThrustAllocator.cuh> #include <THC/THCTensor.hpp> #include <thrust/copy.h> #include <thrust/count.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/transform.h> #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ #include <thrust/system/cuda/execution_policy.h> #endif #include <cfloat> template <typename T> struct TensorFillOp { TensorFillOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* v) { *v = val; } const T val; }; // copypasta from https://github.com/thrust/thrust/blob/master/examples/strided_range.cu template <typename Iterator> class strided_range { public: typedef typename thrust::iterator_difference<Iterator>::type difference_type; struct stride_functor : public thrust::unary_function<difference_type, difference_type> { difference_type stride; stride_functor(difference_type stride) : stride(stride) {} __host__ __device__ difference_type operator()(const difference_type& i) const { return stride * i; } }; typedef typename thrust::counting_iterator<difference_type> CountingIterator; typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator; typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator; // type of the strided_range iterator typedef PermutationIterator iterator; // construct strided_range for the range [first,last) strided_range(Iterator first, Iterator last, difference_type stride) : first(first), last(last), stride(stride) {} iterator begin(void) const { return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride))); } iterator end(void) const { return begin() + ((last - first) + (stride - 1)) / stride; } protected: Iterator first; Iterator last; difference_type stride; }; struct idx_functor { int64_t div; int64_t size; __host__ __device__ idx_functor(int64_t div, int64_t size) : div(div), size(size) {} __host__ __device__ int64_t operator()(int64_t val) { return (val / div) % size; } }; template <typename T> struct NonZeroOp { NonZeroOp() {} __host__ __device__ bool operator()(T lhs) const { if (THCNumerics<T>::ne(lhs, ScalarConvert<float, T>::to(0.0))) { return true; } else { return false; } } }; template <> struct NonZeroOp<bool> { NonZeroOp() {} __host__ __device__ bool operator()(bool lhs) const { return lhs != false; } }; #include <THC/generic/THCTensorMath.cu> #include <THC/THCGenerateAllTypes.h> #include <THC/generic/THCTensorMath.cu> #include <THC/THCGenerateBoolType.h> #include <THC/generic/THCTensorMath.cu> #include <THC/THCGenerateBFloat16Type.h>
feb7a5d918f9ba01162b4ce1011081e567d95462.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions normal z -> s d c @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Matrix is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void zsymmetrize_lower( int m, magmaDoubleComplex *dA, int ldda ) { // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = MAGMA_Z_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void zsymmetrize_upper( int m, magmaDoubleComplex *dA, int ldda ) { // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dA = MAGMA_Z_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } } } /***************************************************************************//** Purpose ------- ZSYMMETRIZE copies lower triangle to upper triangle, or vice-versa, to make dA a general representation of a symmetric matrix. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in,out] dA COMPLEX_16 array, dimension (LDDA,N) The m by m matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_symmetrize *******************************************************************************/ extern "C" void magmablas_zsymmetrize_q( magma_uplo_t uplo, magma_int_t m, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m) ) info = -4; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 ) return; dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( zsymmetrize_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda ); } else { hipLaunchKernelGGL(( zsymmetrize_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda ); } }
feb7a5d918f9ba01162b4ce1011081e567d95462.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions normal z -> s d c @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Matrix is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void zsymmetrize_lower( int m, magmaDoubleComplex *dA, int ldda ) { // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = MAGMA_Z_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void zsymmetrize_upper( int m, magmaDoubleComplex *dA, int ldda ) { // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dA = MAGMA_Z_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } } } /***************************************************************************//** Purpose ------- ZSYMMETRIZE copies lower triangle to upper triangle, or vice-versa, to make dA a general representation of a symmetric matrix. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in,out] dA COMPLEX_16 array, dimension (LDDA,N) The m by m matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_symmetrize *******************************************************************************/ extern "C" void magmablas_zsymmetrize_q( magma_uplo_t uplo, magma_int_t m, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m) ) info = -4; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 ) return; dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); if ( uplo == MagmaUpper ) { zsymmetrize_upper<<< grid, threads, 0, queue->cuda_stream() >>>( m, dA, ldda ); } else { zsymmetrize_lower<<< grid, threads, 0, queue->cuda_stream() >>>( m, dA, ldda ); } }
f99e6a6949d4a1caacff580a9b0e8f266b5e103d.hip
// !!! This is a file automatically generated by hipify!!! //#include <stdio.h> #include <iostream> #include<algorithm> #include <stdlib.h> #include <stdint.h> #include "support.h" #include "kernel.hip" #include<vector> #include<utility> using namespace std; bool pair_compare(const pair<short unsigned int, unsigned int>& p1,const pair<short unsigned int, unsigned int>& p2); int main(int argc, char* argv[]) { FILE *fp = fopen("topic-0.txt", "r"); if (fp == NULL){ cout<<"Can't read file"; exit(0); } cout<<"fp opened"; char *line = NULL; size_t len = 0; unsigned int lines = 0; unsigned int count = 0; char *ln, *nptr; unsigned int *transactions = NULL; unsigned int *trans_offset = NULL; unsigned int *flist = NULL; //unsigned short *flist_key_16 = NULL; unsigned short *flist_key_16_index = NULL; unsigned int element_id = 0; unsigned int item_name = 0; transactions = (unsigned int *) malloc(max_num_of_transaction * max_items_in_transaction * sizeof(unsigned int)); trans_offset = (unsigned int *) malloc((max_num_of_transaction + 1) * sizeof(unsigned int)); flist = (unsigned int *) malloc(max_unique_items * sizeof(unsigned int)); //flist_key_16 = (unsigned short*) malloc(max_unique_items * sizeof(unsigned short)); flist_key_16_index = (unsigned short*) malloc(max_unique_items * sizeof(unsigned short)); memset(flist_key_16_index, 0xFFFF, max_unique_items * sizeof(unsigned short)); trans_offset[0] = 0; while (getline(&line, &len, fp) != -1 && lines < max_num_of_transaction){ count = 0; ln = strtok(line, " "); if (ln != NULL){ //unsigned int a = (unsigned int) strtoul(ln, NULL, 0); item_name = (unsigned int) strtoul(ln, NULL, 0); transactions[element_id++] = item_name; if (item_name < max_unique_items) flist[item_name] = 0; count++; } while (ln != NULL){ ln = strtok(NULL, " "); if (ln != NULL){ item_name = (unsigned int) strtoul(ln, &nptr, 0); if (strcmp(nptr, ln) != 0){ transactions[element_id++] = item_name; if (item_name < max_unique_items) flist[item_name] = 0; count++; } } } trans_offset[lines + 1] = trans_offset[lines] + count; lines++; } cout<<"file parsed\n"; fclose(fp); //trans_offset[lines] = NULL; //transactions[element_id] = NULL; unsigned int num_items_in_transactions = element_id; unsigned int num_transactions = lines - 1; cout<<"Number of Transactions = "<<num_transactions<<endl; cout<<"num_items_in_transactions = "<<num_items_in_transactions<<endl; #if TEST_MODE for (int i = 0; i < num_transactions; i++){ int item_ends = 0; if (i == (num_transactions - 1)){ item_ends = num_items_in_transactions; }else{ item_ends = trans_offset[i+1]; } for (int j = trans_offset[i]; j < item_ends; j++) cout<<transactions[j]<<" "; cout<<endl; } for (int i = 0; i <= num_transactions; i++) { cout<<"(i,offset)"<<i<<","<<trans_offset[i]; } #endif ///////////////////////////////////////////////////////////////////////////////////// /////////////////////// Device Variables Initializations /////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// Timer timer; hipError_t cuda_ret; unsigned int *d_transactions; unsigned int *d_trans_offsets; unsigned int *d_flist, *d_flist_key_16; unsigned short *d_flist_key_16_index; hipDeviceProp_t deviceProp; hipError_t ret; hipGetDeviceProperties(&deviceProp, 0); int SM_PER_BLOCK = deviceProp.sharedMemPerBlock; int CONST_MEM_GPU = deviceProp.totalConstMem; // Allocate device variables ---------------------------------------------- cout<<"Allocating device variables..."; startTime(&timer); cuda_ret = hipMalloc((void**)&d_transactions, num_items_in_transactions * sizeof(unsigned int)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**)&d_trans_offsets, (num_transactions + 1) * sizeof(unsigned int)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**)&d_flist, max_unique_items * sizeof(unsigned int)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**)&d_flist_key_16_index, max_unique_items * sizeof(unsigned short)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**)&d_flist_key_16, max_unique_items * sizeof(unsigned short)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); hipDeviceSynchronize(); stopTime(&timer); cout<<elapsedTime(timer)<<endl; cuda_ret = hipMemcpy(d_transactions, transactions, num_items_in_transactions * sizeof(unsigned int), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); // to test cuda_ret = hipMemcpy(d_trans_offsets, trans_offset, (num_transactions + 1) * sizeof(unsigned int), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = hipMemset(d_flist, 0, max_unique_items * sizeof(unsigned int)); if(cuda_ret != hipSuccess) FATAL("Unable to set device memory"); startTime(&timer); cout<<"histogram kernel\n"; make_flist(d_trans_offsets, d_transactions, d_flist, num_transactions, num_items_in_transactions, SM_PER_BLOCK); stopTime(&timer); cout<<elapsedTime(timer)<<endl; startTime(&timer); cout<<"copying flist form dev to host\n"; cuda_ret = hipMemcpy(flist, d_flist, max_unique_items * sizeof(unsigned int), hipMemcpyDeviceToHost); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to host"); hipDeviceSynchronize(); stopTime(&timer); cout<<elapsedTime(timer)<<endl; //now sort the flist vector<pair<unsigned short, unsigned int> > v; //map<unsigned int, short unsigned int> m; cout<<"sorting and pruning flist:\n"; startTime(&timer); for (int i =0; i < max_unique_items;i++) { if (flist[i] >= support) { v.push_back(pair<unsigned short, unsigned int>(i, flist[i])); } } // print the vector #if TEST_MODE /*cout<<"vector length:"<<v.size()<<endl; vector<pair<unsigned short , unsigned int> >::iterator it; for (it = v.begin(); it != v.end();it++) { cout<<"(key,value)"<<it->first<<","<<it->second<<endl; }*/ #endif std::sort(v.begin(),v.end(), pair_compare); hipDeviceSynchronize(); stopTime(&timer); cout<<elapsedTime(timer)<<endl; cout<<"copying sort data to flist and key arrays:\n"; startTime(&timer); vector<pair<unsigned short , unsigned int> >::reverse_iterator itr; int i = 0; for (itr = v.rbegin(); itr != v.rend();itr++) { flist[i] = itr->second; //flist_key_16[i] = itr->first; flist_key_16_index[itr->first] = i; i++; } #if TEST_MODE for (i = 0; i < max_unique_items; i++) { cout<<"(key,key_index_in_flist)"<<i<<","<<flist_key_16_index[i]<<endl; } #endif hipDeviceSynchronize(); stopTime(&timer); cout<<elapsedTime(timer)<<endl; cout<<"constant mem available:"<<CONST_MEM_GPU<<endl; cout<<"copy flist and key arrays back to device:\n"; startTime(&timer); cuda_ret = hipMemcpy(d_flist, flist, max_unique_items * sizeof(unsigned int), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); //cuda_ret = hipMemcpy(d_flist_key_16, flist_key_16, max_unique_items * sizeof(unsigned short), // hipMemcpyHostToDevice); //if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); bool flag = false; if (max_unique_items * sizeof(unsigned short) < CONST_MEM_GPU) { // keep the index file in constant memory #if TEST_MODE cout<<"copying to constant mem"<<endl; #endif flag = true; cuda_ret = hipMemcpyToSymbol(dc_flist_key_16_index, flist_key_16_index, max_unique_items * sizeof(unsigned short), 0, hipMemcpyHostToDevice); } else { #if TEST_MODE cout<<"copying to global mem"<<endl; #endif flag = false; cuda_ret = hipMemcpy(d_flist_key_16_index, flist_key_16_index, max_unique_items * sizeof(unsigned short), hipMemcpyHostToDevice); } if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); hipDeviceSynchronize(); stopTime(&timer); cout<<elapsedTime(timer)<<endl; //now prune and sort each transaction cout<<"sort_transaction:\n"; startTime(&timer); sort_transaction(d_flist_key_16_index, d_flist, d_transactions, d_trans_offsets, num_transactions, num_items_in_transactions, max_unique_items, flag); stopTime(&timer); cout<<elapsedTime(timer)<<endl; #if TEST_MODE cuda_ret = hipMemcpy(transactions, d_transactions, num_items_in_transactions * sizeof(unsigned int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the host"); for (int i = 0; i < num_transactions; i++){ int item_ends = 0; if (i == (num_transactions - 1)){ item_ends = num_items_in_transactions; }else{ item_ends = trans_offset[i+1]; } for (int j = trans_offset[i]; j < item_ends; j++) cout<<transactions[j]<<" "; cout<<endl; } #endif // Free memory ------------------------------------------------------------ hipFree(d_trans_offsets); hipFree(d_transactions); hipFree(d_flist); hipFree(d_flist_key_16); hipFree(d_flist_key_16_index); free(trans_offset); free(transactions); free(flist); //free(flist_key_16); free(flist_key_16_index); cout<<"program end"; } bool pair_compare(const pair<short unsigned int, unsigned int>& p1,const pair<short unsigned int, unsigned int>& p2) { return p1.second < p2.second; }
f99e6a6949d4a1caacff580a9b0e8f266b5e103d.cu
//#include <stdio.h> #include <iostream> #include<algorithm> #include <stdlib.h> #include <stdint.h> #include "support.h" #include "kernel.cu" #include<vector> #include<utility> using namespace std; bool pair_compare(const pair<short unsigned int, unsigned int>& p1,const pair<short unsigned int, unsigned int>& p2); int main(int argc, char* argv[]) { FILE *fp = fopen("topic-0.txt", "r"); if (fp == NULL){ cout<<"Can't read file"; exit(0); } cout<<"fp opened"; char *line = NULL; size_t len = 0; unsigned int lines = 0; unsigned int count = 0; char *ln, *nptr; unsigned int *transactions = NULL; unsigned int *trans_offset = NULL; unsigned int *flist = NULL; //unsigned short *flist_key_16 = NULL; unsigned short *flist_key_16_index = NULL; unsigned int element_id = 0; unsigned int item_name = 0; transactions = (unsigned int *) malloc(max_num_of_transaction * max_items_in_transaction * sizeof(unsigned int)); trans_offset = (unsigned int *) malloc((max_num_of_transaction + 1) * sizeof(unsigned int)); flist = (unsigned int *) malloc(max_unique_items * sizeof(unsigned int)); //flist_key_16 = (unsigned short*) malloc(max_unique_items * sizeof(unsigned short)); flist_key_16_index = (unsigned short*) malloc(max_unique_items * sizeof(unsigned short)); memset(flist_key_16_index, 0xFFFF, max_unique_items * sizeof(unsigned short)); trans_offset[0] = 0; while (getline(&line, &len, fp) != -1 && lines < max_num_of_transaction){ count = 0; ln = strtok(line, " "); if (ln != NULL){ //unsigned int a = (unsigned int) strtoul(ln, NULL, 0); item_name = (unsigned int) strtoul(ln, NULL, 0); transactions[element_id++] = item_name; if (item_name < max_unique_items) flist[item_name] = 0; count++; } while (ln != NULL){ ln = strtok(NULL, " "); if (ln != NULL){ item_name = (unsigned int) strtoul(ln, &nptr, 0); if (strcmp(nptr, ln) != 0){ transactions[element_id++] = item_name; if (item_name < max_unique_items) flist[item_name] = 0; count++; } } } trans_offset[lines + 1] = trans_offset[lines] + count; lines++; } cout<<"file parsed\n"; fclose(fp); //trans_offset[lines] = NULL; //transactions[element_id] = NULL; unsigned int num_items_in_transactions = element_id; unsigned int num_transactions = lines - 1; cout<<"Number of Transactions = "<<num_transactions<<endl; cout<<"num_items_in_transactions = "<<num_items_in_transactions<<endl; #if TEST_MODE for (int i = 0; i < num_transactions; i++){ int item_ends = 0; if (i == (num_transactions - 1)){ item_ends = num_items_in_transactions; }else{ item_ends = trans_offset[i+1]; } for (int j = trans_offset[i]; j < item_ends; j++) cout<<transactions[j]<<" "; cout<<endl; } for (int i = 0; i <= num_transactions; i++) { cout<<"(i,offset)"<<i<<","<<trans_offset[i]; } #endif ///////////////////////////////////////////////////////////////////////////////////// /////////////////////// Device Variables Initializations /////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// Timer timer; cudaError_t cuda_ret; unsigned int *d_transactions; unsigned int *d_trans_offsets; unsigned int *d_flist, *d_flist_key_16; unsigned short *d_flist_key_16_index; cudaDeviceProp deviceProp; cudaError_t ret; cudaGetDeviceProperties(&deviceProp, 0); int SM_PER_BLOCK = deviceProp.sharedMemPerBlock; int CONST_MEM_GPU = deviceProp.totalConstMem; // Allocate device variables ---------------------------------------------- cout<<"Allocating device variables..."; startTime(&timer); cuda_ret = cudaMalloc((void**)&d_transactions, num_items_in_transactions * sizeof(unsigned int)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**)&d_trans_offsets, (num_transactions + 1) * sizeof(unsigned int)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**)&d_flist, max_unique_items * sizeof(unsigned int)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**)&d_flist_key_16_index, max_unique_items * sizeof(unsigned short)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**)&d_flist_key_16, max_unique_items * sizeof(unsigned short)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cudaDeviceSynchronize(); stopTime(&timer); cout<<elapsedTime(timer)<<endl; cuda_ret = cudaMemcpy(d_transactions, transactions, num_items_in_transactions * sizeof(unsigned int), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); // to test cuda_ret = cudaMemcpy(d_trans_offsets, trans_offset, (num_transactions + 1) * sizeof(unsigned int), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = cudaMemset(d_flist, 0, max_unique_items * sizeof(unsigned int)); if(cuda_ret != cudaSuccess) FATAL("Unable to set device memory"); startTime(&timer); cout<<"histogram kernel\n"; make_flist(d_trans_offsets, d_transactions, d_flist, num_transactions, num_items_in_transactions, SM_PER_BLOCK); stopTime(&timer); cout<<elapsedTime(timer)<<endl; startTime(&timer); cout<<"copying flist form dev to host\n"; cuda_ret = cudaMemcpy(flist, d_flist, max_unique_items * sizeof(unsigned int), cudaMemcpyDeviceToHost); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to host"); cudaDeviceSynchronize(); stopTime(&timer); cout<<elapsedTime(timer)<<endl; //now sort the flist vector<pair<unsigned short, unsigned int> > v; //map<unsigned int, short unsigned int> m; cout<<"sorting and pruning flist:\n"; startTime(&timer); for (int i =0; i < max_unique_items;i++) { if (flist[i] >= support) { v.push_back(pair<unsigned short, unsigned int>(i, flist[i])); } } // print the vector #if TEST_MODE /*cout<<"vector length:"<<v.size()<<endl; vector<pair<unsigned short , unsigned int> >::iterator it; for (it = v.begin(); it != v.end();it++) { cout<<"(key,value)"<<it->first<<","<<it->second<<endl; }*/ #endif std::sort(v.begin(),v.end(), pair_compare); cudaDeviceSynchronize(); stopTime(&timer); cout<<elapsedTime(timer)<<endl; cout<<"copying sort data to flist and key arrays:\n"; startTime(&timer); vector<pair<unsigned short , unsigned int> >::reverse_iterator itr; int i = 0; for (itr = v.rbegin(); itr != v.rend();itr++) { flist[i] = itr->second; //flist_key_16[i] = itr->first; flist_key_16_index[itr->first] = i; i++; } #if TEST_MODE for (i = 0; i < max_unique_items; i++) { cout<<"(key,key_index_in_flist)"<<i<<","<<flist_key_16_index[i]<<endl; } #endif cudaDeviceSynchronize(); stopTime(&timer); cout<<elapsedTime(timer)<<endl; cout<<"constant mem available:"<<CONST_MEM_GPU<<endl; cout<<"copy flist and key arrays back to device:\n"; startTime(&timer); cuda_ret = cudaMemcpy(d_flist, flist, max_unique_items * sizeof(unsigned int), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); //cuda_ret = cudaMemcpy(d_flist_key_16, flist_key_16, max_unique_items * sizeof(unsigned short), // cudaMemcpyHostToDevice); //if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); bool flag = false; if (max_unique_items * sizeof(unsigned short) < CONST_MEM_GPU) { // keep the index file in constant memory #if TEST_MODE cout<<"copying to constant mem"<<endl; #endif flag = true; cuda_ret = cudaMemcpyToSymbol(dc_flist_key_16_index, flist_key_16_index, max_unique_items * sizeof(unsigned short), 0, cudaMemcpyHostToDevice); } else { #if TEST_MODE cout<<"copying to global mem"<<endl; #endif flag = false; cuda_ret = cudaMemcpy(d_flist_key_16_index, flist_key_16_index, max_unique_items * sizeof(unsigned short), cudaMemcpyHostToDevice); } if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cudaDeviceSynchronize(); stopTime(&timer); cout<<elapsedTime(timer)<<endl; //now prune and sort each transaction cout<<"sort_transaction:\n"; startTime(&timer); sort_transaction(d_flist_key_16_index, d_flist, d_transactions, d_trans_offsets, num_transactions, num_items_in_transactions, max_unique_items, flag); stopTime(&timer); cout<<elapsedTime(timer)<<endl; #if TEST_MODE cuda_ret = cudaMemcpy(transactions, d_transactions, num_items_in_transactions * sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the host"); for (int i = 0; i < num_transactions; i++){ int item_ends = 0; if (i == (num_transactions - 1)){ item_ends = num_items_in_transactions; }else{ item_ends = trans_offset[i+1]; } for (int j = trans_offset[i]; j < item_ends; j++) cout<<transactions[j]<<" "; cout<<endl; } #endif // Free memory ------------------------------------------------------------ cudaFree(d_trans_offsets); cudaFree(d_transactions); cudaFree(d_flist); cudaFree(d_flist_key_16); cudaFree(d_flist_key_16_index); free(trans_offset); free(transactions); free(flist); //free(flist_key_16); free(flist_key_16_index); cout<<"program end"; } bool pair_compare(const pair<short unsigned int, unsigned int>& p1,const pair<short unsigned int, unsigned int>& p2) { return p1.second < p2.second; }
3ed3e41d7bf0a63b0615044038aefc95b0733e43.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "matrix_mul.h" // Thread block size #define BLOCK_SIZE 16 // Forward declaration of the device multiplication function __global__ void Muld(float*, float*, int, int, float*); // Host multiplication function // Compute C = A * B // hA is the height of A // wA is the width of A // wB is the width of B //export void Mul(float*, float*, int, int, int, float*); void Mul(float* A, float* B, int hA, int wA, int wB, float* C) { int size; // Load A and B to the device float* Ad; size = hA * wA * sizeof(float); hipMalloc((void**)&Ad, size); hipMemcpy(Ad, A, size, hipMemcpyHostToDevice); float* Bd; size = wA * wB * sizeof(float); hipMalloc((void**)&Bd, size); hipMemcpy(Bd, B, size, hipMemcpyHostToDevice); // Allocate C on the device float* Cd; size = hA * wB * sizeof(float); hipMalloc((void**)&Cd, size); // Compute the execution configuration assuming // the matrix dimensions are multiples of BLOCK_SIZE dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(wB / dimBlock.x, hA / dimBlock.y); // Launch the device computation hipLaunchKernelGGL(( Muld), dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd, wA, wB, Cd); // Read C from the device hipMemcpy(C, Cd, size, hipMemcpyDeviceToHost); // Free device memory hipFree(Ad); hipFree(Bd); hipFree(Cd); } // Device multiplication function called by Mul() // Compute C = A * B // wA is the width of A // wB is the width of B __global__ void Muld(float* A, float* B, int wA, int wB, float* C) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = ...; // Index of the last sub-matrix of A processed by the block int aEnd = ...; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // The element of the block sub-matrix that is computed // by the thread float Csub = 0; // Loop over all the sub-matrices of A and B required to // compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Shared memory for the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Shared memory for the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from global memory to shared memory; // each thread loads one element of each matrix As[ty][tx] = A[...]; Bs[ty][tx] = B[...]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix for (int k = 0; k < BLOCK_SIZE; ++k) .... // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to global memory; // each thread writes one element ... }
3ed3e41d7bf0a63b0615044038aefc95b0733e43.cu
#include <stdio.h> #include "matrix_mul.h" // Thread block size #define BLOCK_SIZE 16 // Forward declaration of the device multiplication function __global__ void Muld(float*, float*, int, int, float*); // Host multiplication function // Compute C = A * B // hA is the height of A // wA is the width of A // wB is the width of B //export void Mul(float*, float*, int, int, int, float*); void Mul(float* A, float* B, int hA, int wA, int wB, float* C) { int size; // Load A and B to the device float* Ad; size = hA * wA * sizeof(float); cudaMalloc((void**)&Ad, size); cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice); float* Bd; size = wA * wB * sizeof(float); cudaMalloc((void**)&Bd, size); cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice); // Allocate C on the device float* Cd; size = hA * wB * sizeof(float); cudaMalloc((void**)&Cd, size); // Compute the execution configuration assuming // the matrix dimensions are multiples of BLOCK_SIZE dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(wB / dimBlock.x, hA / dimBlock.y); // Launch the device computation Muld<<<dimGrid, dimBlock>>>(Ad, Bd, wA, wB, Cd); // Read C from the device cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); } // Device multiplication function called by Mul() // Compute C = A * B // wA is the width of A // wB is the width of B __global__ void Muld(float* A, float* B, int wA, int wB, float* C) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = ...; // Index of the last sub-matrix of A processed by the block int aEnd = ...; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // The element of the block sub-matrix that is computed // by the thread float Csub = 0; // Loop over all the sub-matrices of A and B required to // compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Shared memory for the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Shared memory for the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from global memory to shared memory; // each thread loads one element of each matrix As[ty][tx] = A[...]; Bs[ty][tx] = B[...]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix for (int k = 0; k < BLOCK_SIZE; ++k) .... // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to global memory; // each thread writes one element ... }
d7c72518e9db28bed131c9753dabafa053a63671.hip
// !!! This is a file automatically generated by hipify!!! /* * spGPU - Sparse matrices on GPU library. * * Copyright (C) 2010 - 2014 * Davide Barbieri - University of Rome Tor Vergata * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 3 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "stdio.h" #include "cudadebug.h" #include "cudalang.h" extern "C" { #include "core.h" #include "vector.h" } #include "debug.h" #define VALUE_TYPE hipDoubleComplex #define TYPE_SYMBOL Z #include "axy_base.cuh"
d7c72518e9db28bed131c9753dabafa053a63671.cu
/* * spGPU - Sparse matrices on GPU library. * * Copyright (C) 2010 - 2014 * Davide Barbieri - University of Rome Tor Vergata * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 3 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "stdio.h" #include "cudadebug.h" #include "cudalang.h" extern "C" { #include "core.h" #include "vector.h" } #include "debug.h" #define VALUE_TYPE cuDoubleComplex #define TYPE_SYMBOL Z #include "axy_base.cuh"
2f13e134e1334eca7784c4ff8a2edeec46169e34.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Mask_Sum_Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; hipMalloc(&A, XSIZE*YSIZE); int valCount = 1; int *scalarOut = NULL; hipMalloc(&scalarOut, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Mask_Sum_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,valCount,scalarOut); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Mask_Sum_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,valCount,scalarOut); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Mask_Sum_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,valCount,scalarOut); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2f13e134e1334eca7784c4ff8a2edeec46169e34.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Mask_Sum_Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); int valCount = 1; int *scalarOut = NULL; cudaMalloc(&scalarOut, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Mask_Sum_Kernel<<<gridBlock,threadBlock>>>(A,valCount,scalarOut); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Mask_Sum_Kernel<<<gridBlock,threadBlock>>>(A,valCount,scalarOut); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Mask_Sum_Kernel<<<gridBlock,threadBlock>>>(A,valCount,scalarOut); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
78e26f187860588a334060384becd15c326c0b88.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <iostream> #include "include/hip/hip_fp16.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/sparse_matrix_mul.cuh" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" template <typename T, typename S> __global__ void CSRSparseMatrixMulKernel(const T *a_shape_addr, T *a_indptr_addr, T *a_indices_addr, S *a_values_addr, S *b_dense_addr, T *c_shape_addr, T *c_indptr_addr, T *c_indices_addr, S *c_values_addr, int row_, int col_) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < col_) { int32_t col = a_indices_addr[i]; int32_t row = -1; for (int j = 0; j < row_ - 1; j++) { if (a_indptr_addr[j] <= i && a_indptr_addr[j + 1] > i) { row = j; break; } } int32_t absIndex = row * a_shape_addr[1] + col; c_values_addr[i] = a_values_addr[i] * b_dense_addr[absIndex]; } } template <typename T, typename S> void CalSparseMatrixMul(const T *a_shape_addr, T *a_batch_pointers_addr, T *a_indptr_addr, T *a_indices_addr, S *a_values_addr, S *b_dense_addr, T *c_shape_addr, T *c_batch_pointers_addr, T *c_indptr_addr, T *c_indices_addr, S *c_values_addr, int row_, int col_, uint32_t device_id, hipStream_t cuda_stream) { hipLaunchKernelGGL(( CSRSparseMatrixMulKernel), dim3(1), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, a_shape_addr, a_indptr_addr, a_indices_addr, a_values_addr, b_dense_addr, c_shape_addr, c_indptr_addr, c_indices_addr, c_values_addr, row_, col_); return; } template CUDA_LIB_EXPORT void CalSparseMatrixMul<int, float>( const int *a_shape_addr, int *a_batch_pointers_addr, int *a_indptr_addr, int *a_indices_addr, float *a_values_addr, float *b_dense_addr, int *c_shape_addr, int *c_batch_pointers_addr, int *c_indptr_addr, int *c_indices_addr, float *c_values_addr, int row_, int col_, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int, double>( const int *a_shape_addr, int *a_batch_pointers_addr, int *a_indptr_addr, int *a_indices_addr, double *a_values_addr, double *b_dense_addr, int *c_shape_addr, int *c_batch_pointers_addr, int *c_indptr_addr, int *c_indices_addr, double *c_values_addr, int row_, int col_, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int64_t, float>( const int64_t *a_shape_addr, int64_t *a_batch_pointers_addr, int64_t *a_indptr_addr, int64_t *a_indices_addr, float *a_values_addr, float *b_dense_addr, int64_t *c_shape_addr, int64_t *c_batch_pointers_addr, int64_t *c_indptr_addr, int64_t *c_indices_addr, float *c_values_addr, int row_, int col_, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int64_t, double>( const int64_t *a_shape_addr, int64_t *a_batch_pointers_addr, int64_t *a_indptr_addr, int64_t *a_indices_addr, double *a_values_addr, double *b_dense_addr, int64_t *c_shape_addr, int64_t *c_batch_pointers_addr, int64_t *c_indptr_addr, int64_t *c_indices_addr, double *c_values_addr, int row_, int col_, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int64_t, int>( const int64_t *a_shape_addr, int64_t *a_batch_pointers_addr, int64_t *a_indptr_addr, int64_t *a_indices_addr, int *a_values_addr, int *b_dense_addr, int64_t *c_shape_addr, int64_t *c_batch_pointers_addr, int64_t *c_indptr_addr, int64_t *c_indices_addr, int *c_values_addr, int row_, int col_, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int64_t, int64_t>( const int64_t *a_shape_addr, int64_t *a_batch_pointers_addr, int64_t *a_indptr_addr, int64_t *a_indices_addr, int64_t *a_values_addr, int64_t *b_dense_addr, int64_t *c_shape_addr, int64_t *c_batch_pointers_addr, int64_t *c_indptr_addr, int64_t *c_indices_addr, int64_t *c_values_addr, int row_, int col_, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int, int>( const int *a_shape_addr, int *a_batch_pointers_addr, int *a_indptr_addr, int *a_indices_addr, int *a_values_addr, int *b_dense_addr, int *c_shape_addr, int *c_batch_pointers_addr, int *c_indptr_addr, int *c_indices_addr, int *c_values_addr, int row_, int col_, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int, int64_t>( const int *a_shape_addr, int *a_batch_pointers_addr, int *a_indptr_addr, int *a_indices_addr, int64_t *a_values_addr, int64_t *b_dense_addr, int *c_shape_addr, int *c_batch_pointers_addr, int *c_indptr_addr, int *c_indices_addr, int64_t *c_values_addr, int row_, int col_, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int, int16_t>( const int *a_shape_addr, int *a_batch_pointers_addr, int *a_indptr_addr, int *a_indices_addr, int16_t *a_values_addr, int16_t *b_dense_addr, int *c_shape_addr, int *c_batch_pointers_addr, int *c_indptr_addr, int *c_indices_addr, int16_t *c_values_addr, int row_, int col_, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int64_t, int16_t>( const int64_t *a_shape_addr, int64_t *a_batch_pointers_addr, int64_t *a_indptr_addr, int64_t *a_indices_addr, int16_t *a_values_addr, int16_t *b_dense_addr, int64_t *c_shape_addr, int64_t *c_batch_pointers_addr, int64_t *c_indptr_addr, int64_t *c_indices_addr, int16_t *c_values_addr, int row_, int col_, uint32_t device_id, hipStream_t cuda_stream);
78e26f187860588a334060384becd15c326c0b88.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <iostream> #include "include/cuda_fp16.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/sparse_matrix_mul.cuh" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" template <typename T, typename S> __global__ void CSRSparseMatrixMulKernel(const T *a_shape_addr, T *a_indptr_addr, T *a_indices_addr, S *a_values_addr, S *b_dense_addr, T *c_shape_addr, T *c_indptr_addr, T *c_indices_addr, S *c_values_addr, int row_, int col_) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < col_) { int32_t col = a_indices_addr[i]; int32_t row = -1; for (int j = 0; j < row_ - 1; j++) { if (a_indptr_addr[j] <= i && a_indptr_addr[j + 1] > i) { row = j; break; } } int32_t absIndex = row * a_shape_addr[1] + col; c_values_addr[i] = a_values_addr[i] * b_dense_addr[absIndex]; } } template <typename T, typename S> void CalSparseMatrixMul(const T *a_shape_addr, T *a_batch_pointers_addr, T *a_indptr_addr, T *a_indices_addr, S *a_values_addr, S *b_dense_addr, T *c_shape_addr, T *c_batch_pointers_addr, T *c_indptr_addr, T *c_indices_addr, S *c_values_addr, int row_, int col_, uint32_t device_id, cudaStream_t cuda_stream) { CSRSparseMatrixMulKernel<<<1, CUDA_THREADS(device_id), 0, cuda_stream>>>( a_shape_addr, a_indptr_addr, a_indices_addr, a_values_addr, b_dense_addr, c_shape_addr, c_indptr_addr, c_indices_addr, c_values_addr, row_, col_); return; } template CUDA_LIB_EXPORT void CalSparseMatrixMul<int, float>( const int *a_shape_addr, int *a_batch_pointers_addr, int *a_indptr_addr, int *a_indices_addr, float *a_values_addr, float *b_dense_addr, int *c_shape_addr, int *c_batch_pointers_addr, int *c_indptr_addr, int *c_indices_addr, float *c_values_addr, int row_, int col_, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int, double>( const int *a_shape_addr, int *a_batch_pointers_addr, int *a_indptr_addr, int *a_indices_addr, double *a_values_addr, double *b_dense_addr, int *c_shape_addr, int *c_batch_pointers_addr, int *c_indptr_addr, int *c_indices_addr, double *c_values_addr, int row_, int col_, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int64_t, float>( const int64_t *a_shape_addr, int64_t *a_batch_pointers_addr, int64_t *a_indptr_addr, int64_t *a_indices_addr, float *a_values_addr, float *b_dense_addr, int64_t *c_shape_addr, int64_t *c_batch_pointers_addr, int64_t *c_indptr_addr, int64_t *c_indices_addr, float *c_values_addr, int row_, int col_, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int64_t, double>( const int64_t *a_shape_addr, int64_t *a_batch_pointers_addr, int64_t *a_indptr_addr, int64_t *a_indices_addr, double *a_values_addr, double *b_dense_addr, int64_t *c_shape_addr, int64_t *c_batch_pointers_addr, int64_t *c_indptr_addr, int64_t *c_indices_addr, double *c_values_addr, int row_, int col_, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int64_t, int>( const int64_t *a_shape_addr, int64_t *a_batch_pointers_addr, int64_t *a_indptr_addr, int64_t *a_indices_addr, int *a_values_addr, int *b_dense_addr, int64_t *c_shape_addr, int64_t *c_batch_pointers_addr, int64_t *c_indptr_addr, int64_t *c_indices_addr, int *c_values_addr, int row_, int col_, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int64_t, int64_t>( const int64_t *a_shape_addr, int64_t *a_batch_pointers_addr, int64_t *a_indptr_addr, int64_t *a_indices_addr, int64_t *a_values_addr, int64_t *b_dense_addr, int64_t *c_shape_addr, int64_t *c_batch_pointers_addr, int64_t *c_indptr_addr, int64_t *c_indices_addr, int64_t *c_values_addr, int row_, int col_, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int, int>( const int *a_shape_addr, int *a_batch_pointers_addr, int *a_indptr_addr, int *a_indices_addr, int *a_values_addr, int *b_dense_addr, int *c_shape_addr, int *c_batch_pointers_addr, int *c_indptr_addr, int *c_indices_addr, int *c_values_addr, int row_, int col_, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int, int64_t>( const int *a_shape_addr, int *a_batch_pointers_addr, int *a_indptr_addr, int *a_indices_addr, int64_t *a_values_addr, int64_t *b_dense_addr, int *c_shape_addr, int *c_batch_pointers_addr, int *c_indptr_addr, int *c_indices_addr, int64_t *c_values_addr, int row_, int col_, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int, int16_t>( const int *a_shape_addr, int *a_batch_pointers_addr, int *a_indptr_addr, int *a_indices_addr, int16_t *a_values_addr, int16_t *b_dense_addr, int *c_shape_addr, int *c_batch_pointers_addr, int *c_indptr_addr, int *c_indices_addr, int16_t *c_values_addr, int row_, int col_, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixMul<int64_t, int16_t>( const int64_t *a_shape_addr, int64_t *a_batch_pointers_addr, int64_t *a_indptr_addr, int64_t *a_indices_addr, int16_t *a_values_addr, int16_t *b_dense_addr, int64_t *c_shape_addr, int64_t *c_batch_pointers_addr, int64_t *c_indptr_addr, int64_t *c_indices_addr, int16_t *c_values_addr, int row_, int col_, uint32_t device_id, cudaStream_t cuda_stream);
7fd36a49415e653af6cafb318f1690f04ac53217.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ void sumByReduction( volatile double* sdata, double mySum, const unsigned int tid ) { sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); if (tid < 32) { sdata[tid] = mySum = mySum + sdata[tid + 32]; sdata[tid] = mySum = mySum + sdata[tid + 16]; sdata[tid] = mySum = mySum + sdata[tid + 8]; sdata[tid] = mySum = mySum + sdata[tid + 4]; sdata[tid] = mySum = mySum + sdata[tid + 2]; sdata[tid] = mySum = mySum + sdata[tid + 1]; } __syncthreads() ; } __global__ void computePdKernel(double* particle_pd, int particles_per_feature, int n_features, double* feature_pd) { __shared__ double shmem[256] ; for ( int n = blockIdx.x ; n < n_features ;n+= gridDim.x ){ int offset = n*particles_per_feature ; double val = 0 ; for ( int i = offset+threadIdx.x ; i < offset + particles_per_feature ; i+= blockDim.x ){ val += particle_pd[i] ; } sumByReduction(shmem,val,threadIdx.x); if ( threadIdx.x == 0) feature_pd[n] = shmem[0]/particles_per_feature ; __syncthreads() ; } }
7fd36a49415e653af6cafb318f1690f04ac53217.cu
#include "includes.h" __device__ void sumByReduction( volatile double* sdata, double mySum, const unsigned int tid ) { sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); if (tid < 32) { sdata[tid] = mySum = mySum + sdata[tid + 32]; sdata[tid] = mySum = mySum + sdata[tid + 16]; sdata[tid] = mySum = mySum + sdata[tid + 8]; sdata[tid] = mySum = mySum + sdata[tid + 4]; sdata[tid] = mySum = mySum + sdata[tid + 2]; sdata[tid] = mySum = mySum + sdata[tid + 1]; } __syncthreads() ; } __global__ void computePdKernel(double* particle_pd, int particles_per_feature, int n_features, double* feature_pd) { __shared__ double shmem[256] ; for ( int n = blockIdx.x ; n < n_features ;n+= gridDim.x ){ int offset = n*particles_per_feature ; double val = 0 ; for ( int i = offset+threadIdx.x ; i < offset + particles_per_feature ; i+= blockDim.x ){ val += particle_pd[i] ; } sumByReduction(shmem,val,threadIdx.x); if ( threadIdx.x == 0) feature_pd[n] = shmem[0]/particles_per_feature ; __syncthreads() ; } }
3491c2846f2aa38e46dc339eeb5764ada03e55dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void gpu_Filter_peaks_kernel(unsigned int *d_new_peak_list_DM, unsigned int *d_new_peak_list_TS, unsigned int *d_new_peak_list_BW, float *d_new_peak_list_SNR, unsigned int *d_peak_list_DM, unsigned int *d_peak_list_TS, unsigned int *d_peak_list_BW, float *d_peak_list_SNR, unsigned int nElements, unsigned int max_distance, int nLoops, int max_list_pos, int *gmem_pos){ // PPF_DPB = 128 //this is because I set nThreads to 64 // PPF_PEAKS_PER_BLOCK = something small like 10 __shared__ float s_data_snr[PPF_DPB]; __shared__ int s_data_dm[PPF_DPB]; __shared__ int s_data_ts[PPF_DPB]; __shared__ int s_flag[PPF_NTHREADS]; int d, s; int elements_pos, pos; float snr, distance, fs, fd; // float4 f4temp; if(threadIdx.x<PPF_PEAKS_PER_BLOCK){ s_flag[threadIdx.x] = 1; } else{ s_flag[threadIdx.x] = 0; } for(int f=0; f<nLoops; f++){ // Load new data blob //s_data[threadIdx.x + 2*PPF_DPB] = 0; // SNR //s_data[threadIdx.x + 64 + 2*PPF_DPB] = 0; // SNR pos = PPF_DPB*f + threadIdx.x; if(pos < nElements){ // f4temp = __ldg(&d_peak_list[pos]); s_data_dm[threadIdx.x] = d_peak_list_DM[pos]; //f4temp.x; // DM s_data_ts[threadIdx.x] = d_peak_list_TS[pos]; //f4temp.y; // Time s_data_snr[threadIdx.x] = d_peak_list_SNR[pos]; //f4temp.z; // SNR } else { s_data_dm[threadIdx.x] = 0; //f4temp.x; // DM s_data_ts[threadIdx.x] = 0; //f4temp.y; // Time s_data_snr[threadIdx.x] = -1000; //f4temp.z; // SNR } // if(blockIdx.x==0 && threadIdx.x==0) printf("point: [%d;%d;%lf]\n", s_data_dm[threadIdx.x], s_data_ts[threadIdx.x], s_data_snr[threadIdx.x]); pos = PPF_DPB*f + threadIdx.x + PPF_NTHREADS; if(pos < nElements){ // f4temp = __ldg(&d_peak_list[PPF_DPB*f + threadIdx.x + (PPF_DPB>>1)]); s_data_dm[threadIdx.x + PPF_NTHREADS ] = d_peak_list_DM[pos]; //f4temp.x; // DM s_data_ts[threadIdx.x + PPF_NTHREADS ] = d_peak_list_TS[pos]; //f4temp.y; // Time s_data_snr[threadIdx.x + PPF_NTHREADS] = d_peak_list_SNR[pos]; //f4temp.z; // SNR } else { s_data_dm[threadIdx.x + PPF_NTHREADS] = 0; //f4temp.x; // DM s_data_ts[threadIdx.x + PPF_NTHREADS] = 0; //f4temp.y; // Time s_data_snr[threadIdx.x + PPF_NTHREADS] = -1000; //f4temp.z; // SNR } __syncthreads(); elements_pos = blockIdx.x*PPF_PEAKS_PER_BLOCK; for(int p=0; p<PPF_PEAKS_PER_BLOCK; p++){ // if (blockIdx.x == 0) printf("%d %d\n", p, s_flag[p]); if((s_flag[p]) && ((elements_pos + p) < nElements)){ //pos = elements_pos+p; //if(pos<nElements){ d = d_peak_list_DM[elements_pos+p]; // DM s = d_peak_list_TS[elements_pos+p]; // Time snr = d_peak_list_SNR[elements_pos+p]; // SNR // first element // if(blockIdx.x==0) printf("s_data: %lf, snr: %lf, pos: %d\n", s_data_snr[threadIdx.x], snr, p); if( (s_data_snr[threadIdx.x] >= snr)){ fs = ((float)s_data_dm[threadIdx.x] - (float)d); fd = ((float)s_data_ts[threadIdx.x] - (float)s); distance = (fd*fd + fs*fs); // if(blockIdx.x==0) printf("%d - %d = %d; %d - %d = %d\n",s_data_dm[threadIdx.x], d, fs, s_data_ts[threadIdx.x], s, fd, distance); if( (distance < (float)max_distance) && (distance!=0) ){ // if(blockIdx.x==0) printf("distance: %d %lf %lf %lf %d %d;\n", p, distance, fs, fd, s, d); s_flag[p]=0; } } //second element if(s_data_snr[threadIdx.x + PPF_NTHREADS] >= snr){ fs = ((float)s_data_dm[threadIdx.x + PPF_NTHREADS] - (float)d); fd = ((float)s_data_ts[threadIdx.x + PPF_NTHREADS] - (float)s); distance = (fd*fd + fs*fs); // if(blockIdx.x==0) printf("%d - %d = %d; %d - %d = %d\n",s_data_dm[threadIdx.x], d, fs, s_data_ts[threadIdx.x], s, fd, distance); if( (distance < (float)max_distance) && (distance!=0)){ s_flag[p]=0; // if(blockIdx.x==0) printf("xdistance: %d %lf %lf %lf %d %d;\n", p, distance, fs, fd, s, d); } } //} } } // for p } // Saving peaks that got through elements_pos = blockIdx.x*PPF_PEAKS_PER_BLOCK; if(threadIdx.x < PPF_PEAKS_PER_BLOCK){ if( (s_flag[threadIdx.x] == 1) && ((elements_pos + threadIdx.x) < nElements)){ int list_pos=atomicAdd(gmem_pos, 1); if(list_pos<max_list_pos){ d_new_peak_list_DM[list_pos] = d_peak_list_DM[elements_pos + threadIdx.x]; d_new_peak_list_TS[list_pos] = d_peak_list_TS[elements_pos + threadIdx.x]; d_new_peak_list_BW[list_pos] = d_peak_list_BW[elements_pos + threadIdx.x]; d_new_peak_list_SNR[list_pos] = d_peak_list_SNR[elements_pos + threadIdx.x]; } } } }
3491c2846f2aa38e46dc339eeb5764ada03e55dc.cu
#include "includes.h" __global__ void gpu_Filter_peaks_kernel(unsigned int *d_new_peak_list_DM, unsigned int *d_new_peak_list_TS, unsigned int *d_new_peak_list_BW, float *d_new_peak_list_SNR, unsigned int *d_peak_list_DM, unsigned int *d_peak_list_TS, unsigned int *d_peak_list_BW, float *d_peak_list_SNR, unsigned int nElements, unsigned int max_distance, int nLoops, int max_list_pos, int *gmem_pos){ // PPF_DPB = 128 //this is because I set nThreads to 64 // PPF_PEAKS_PER_BLOCK = something small like 10 __shared__ float s_data_snr[PPF_DPB]; __shared__ int s_data_dm[PPF_DPB]; __shared__ int s_data_ts[PPF_DPB]; __shared__ int s_flag[PPF_NTHREADS]; int d, s; int elements_pos, pos; float snr, distance, fs, fd; // float4 f4temp; if(threadIdx.x<PPF_PEAKS_PER_BLOCK){ s_flag[threadIdx.x] = 1; } else{ s_flag[threadIdx.x] = 0; } for(int f=0; f<nLoops; f++){ // Load new data blob //s_data[threadIdx.x + 2*PPF_DPB] = 0; // SNR //s_data[threadIdx.x + 64 + 2*PPF_DPB] = 0; // SNR pos = PPF_DPB*f + threadIdx.x; if(pos < nElements){ // f4temp = __ldg(&d_peak_list[pos]); s_data_dm[threadIdx.x] = d_peak_list_DM[pos]; //f4temp.x; // DM s_data_ts[threadIdx.x] = d_peak_list_TS[pos]; //f4temp.y; // Time s_data_snr[threadIdx.x] = d_peak_list_SNR[pos]; //f4temp.z; // SNR } else { s_data_dm[threadIdx.x] = 0; //f4temp.x; // DM s_data_ts[threadIdx.x] = 0; //f4temp.y; // Time s_data_snr[threadIdx.x] = -1000; //f4temp.z; // SNR } // if(blockIdx.x==0 && threadIdx.x==0) printf("point: [%d;%d;%lf]\n", s_data_dm[threadIdx.x], s_data_ts[threadIdx.x], s_data_snr[threadIdx.x]); pos = PPF_DPB*f + threadIdx.x + PPF_NTHREADS; if(pos < nElements){ // f4temp = __ldg(&d_peak_list[PPF_DPB*f + threadIdx.x + (PPF_DPB>>1)]); s_data_dm[threadIdx.x + PPF_NTHREADS ] = d_peak_list_DM[pos]; //f4temp.x; // DM s_data_ts[threadIdx.x + PPF_NTHREADS ] = d_peak_list_TS[pos]; //f4temp.y; // Time s_data_snr[threadIdx.x + PPF_NTHREADS] = d_peak_list_SNR[pos]; //f4temp.z; // SNR } else { s_data_dm[threadIdx.x + PPF_NTHREADS] = 0; //f4temp.x; // DM s_data_ts[threadIdx.x + PPF_NTHREADS] = 0; //f4temp.y; // Time s_data_snr[threadIdx.x + PPF_NTHREADS] = -1000; //f4temp.z; // SNR } __syncthreads(); elements_pos = blockIdx.x*PPF_PEAKS_PER_BLOCK; for(int p=0; p<PPF_PEAKS_PER_BLOCK; p++){ // if (blockIdx.x == 0) printf("%d %d\n", p, s_flag[p]); if((s_flag[p]) && ((elements_pos + p) < nElements)){ //pos = elements_pos+p; //if(pos<nElements){ d = d_peak_list_DM[elements_pos+p]; // DM s = d_peak_list_TS[elements_pos+p]; // Time snr = d_peak_list_SNR[elements_pos+p]; // SNR // first element // if(blockIdx.x==0) printf("s_data: %lf, snr: %lf, pos: %d\n", s_data_snr[threadIdx.x], snr, p); if( (s_data_snr[threadIdx.x] >= snr)){ fs = ((float)s_data_dm[threadIdx.x] - (float)d); fd = ((float)s_data_ts[threadIdx.x] - (float)s); distance = (fd*fd + fs*fs); // if(blockIdx.x==0) printf("%d - %d = %d; %d - %d = %d\n",s_data_dm[threadIdx.x], d, fs, s_data_ts[threadIdx.x], s, fd, distance); if( (distance < (float)max_distance) && (distance!=0) ){ // if(blockIdx.x==0) printf("distance: %d %lf %lf %lf %d %d;\n", p, distance, fs, fd, s, d); s_flag[p]=0; } } //second element if(s_data_snr[threadIdx.x + PPF_NTHREADS] >= snr){ fs = ((float)s_data_dm[threadIdx.x + PPF_NTHREADS] - (float)d); fd = ((float)s_data_ts[threadIdx.x + PPF_NTHREADS] - (float)s); distance = (fd*fd + fs*fs); // if(blockIdx.x==0) printf("%d - %d = %d; %d - %d = %d\n",s_data_dm[threadIdx.x], d, fs, s_data_ts[threadIdx.x], s, fd, distance); if( (distance < (float)max_distance) && (distance!=0)){ s_flag[p]=0; // if(blockIdx.x==0) printf("xdistance: %d %lf %lf %lf %d %d;\n", p, distance, fs, fd, s, d); } } //} } } // for p } // Saving peaks that got through elements_pos = blockIdx.x*PPF_PEAKS_PER_BLOCK; if(threadIdx.x < PPF_PEAKS_PER_BLOCK){ if( (s_flag[threadIdx.x] == 1) && ((elements_pos + threadIdx.x) < nElements)){ int list_pos=atomicAdd(gmem_pos, 1); if(list_pos<max_list_pos){ d_new_peak_list_DM[list_pos] = d_peak_list_DM[elements_pos + threadIdx.x]; d_new_peak_list_TS[list_pos] = d_peak_list_TS[elements_pos + threadIdx.x]; d_new_peak_list_BW[list_pos] = d_peak_list_BW[elements_pos + threadIdx.x]; d_new_peak_list_SNR[list_pos] = d_peak_list_SNR[elements_pos + threadIdx.x]; } } } }
ac1ca1037ba435cc309c2a2184300e218b0d0f9b.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Context.h> #include <ATen/hip/HIPContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/PinnedMemoryAllocator.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/HIPSolver.h> #include <ATen/hip/HIPBlas.h> #include <ATen/hip/HIPEvent.h> #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/hip/MiscUtils.h> #include <ATen/native/hip/BatchLinearAlgebraLib.h> #ifdef USE_CUSOLVER namespace at { namespace native { inline static Tensor column_major_identity_matrix_like(const Tensor& self) { auto size = self.sizes(); auto size_slice = IntArrayRef(size.data(), size.size()-1); return at::ones(size_slice, self.options()).diag_embed().transpose(-2, -1); } template <typename scalar_t> inline static void _apply_single_inverse_helper(scalar_t* self_ptr, scalar_t* self_inv_ptr, int* ipiv_ptr, int* info_ptr, int n) { // self_inv_ptr should already be an identity matrix auto handle = at::cuda::getCurrentCUDASolverDnHandle(); at::cuda::solver::getrf<scalar_t>(handle, n, n, self_ptr, n, ipiv_ptr, info_ptr); at::cuda::solver::getrs<scalar_t>(handle, n, n, self_ptr, n, ipiv_ptr, self_inv_ptr, n, info_ptr + 1); } template <typename scalar_t> static void apply_batched_inverse_lib(Tensor& self, Tensor& self_inv, Tensor& infos) { const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); const int n = cuda_int_cast(self.size(-2), "self.size(-2)"); auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get(); if (use_loop_launch(batch_size, n)) { int* p_infos = infos.data_ptr<int>(); auto main_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); at::cuda::CUDAEvent main_event; main_event.record(main_stream); for (int64_t i = 0; i < batch_size; i++) { auto stream = at::hip::getStreamFromPoolMasqueradingAsCUDA(); at::hip::HIPStreamGuardMasqueradingAsCUDA guard(stream); main_event.block(stream); auto dataPtr = allocator.allocate(sizeof(int) * n); int* pivot = reinterpret_cast<int*>(dataPtr.get()); _apply_single_inverse_helper<scalar_t>( &self_data[i * self_mat_stride], &self_inv_data[i * self_inv_mat_stride], pivot, p_infos + i * 2, n); at::cuda::CUDAEvent finished; finished.record(stream); finished.block(main_stream); } } else { // cublas batched kernels require input be "device array of device pointers" Tensor self_array = at::arange( reinterpret_cast<long>(self_data), reinterpret_cast<long>(&self_data[(batch_size-1) * self_mat_stride]) + 1, static_cast<long>(self_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); Tensor self_inv_array = at::arange( reinterpret_cast<long>(self_inv_data), reinterpret_cast<long>(&self_inv_data[(batch_size-1) * self_inv_mat_stride]) + 1, static_cast<long>(self_inv_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); auto dataPtr = allocator.allocate(sizeof(int)*batch_size*n); int* ipiv_array = reinterpret_cast<int*>(dataPtr.get()); Tensor _info1 = at::zeros({batch_size}, self.options().dtype(at::kInt)); Tensor _info2 = at::zeros({batch_size}, self.options().dtype(at::kInt)); at::cuda::blas::getrfBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), n, ipiv_array, _info1.data_ptr<int>(), batch_size); at::cuda::blas::getriBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), n, ipiv_array, _info2.data_ptr<int>(), batch_size, reinterpret_cast<scalar_t**>(self_inv_array.data_ptr())); infos = at::stack({_info1, _info2}, 1); } } template <typename scalar_t> static void apply_single_inverse_lib(const Tensor& self, Tensor& self_inv, Tensor& info) { int n = cuda_int_cast(self.size(-2), "self.size(-2)"); Tensor ipiv = at::empty({n}, self.options().dtype(at::kInt)); _apply_single_inverse_helper<scalar_t>( self.data_ptr<scalar_t>(), self_inv.data_ptr<scalar_t>(), ipiv.data_ptr<int>(), info.data_ptr<int>(), n); } Tensor _inverse_helper_cuda_lib(const Tensor& self) { Tensor self_working_copy = cloneBatchedColumnMajor(self); Tensor self_inv_working_copy = column_major_identity_matrix_like(self_working_copy); const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); if (self.dim() > 2 && batch_size > 1) { Tensor infos = at::zeros({batchCount(self) * 2}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse_lib<scalar_t>( self_working_copy, self_inv_working_copy, infos); }); batchCheckErrors(infos, "inverse_cuda", false, 2); } else { Tensor info = at::zeros({2}, self.options().dtype(at::kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse_lib<scalar_t>(self_working_copy, self_inv_working_copy, info); }); batchCheckErrors(info, "inverse_cuda", false, 2); } return self_inv_working_copy; } }} // namespace at::native #endif // USE_CUSOLVER
ac1ca1037ba435cc309c2a2184300e218b0d0f9b.cu
#include <ATen/Context.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/PinnedMemoryAllocator.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/CUDASolver.h> #include <ATen/cuda/CUDABlas.h> #include <ATen/cuda/CUDAEvent.h> #include <c10/cuda/CUDAStream.h> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/cuda/MiscUtils.h> #include <ATen/native/cuda/BatchLinearAlgebraLib.h> #ifdef USE_CUSOLVER namespace at { namespace native { inline static Tensor column_major_identity_matrix_like(const Tensor& self) { auto size = self.sizes(); auto size_slice = IntArrayRef(size.data(), size.size()-1); return at::ones(size_slice, self.options()).diag_embed().transpose(-2, -1); } template <typename scalar_t> inline static void _apply_single_inverse_helper(scalar_t* self_ptr, scalar_t* self_inv_ptr, int* ipiv_ptr, int* info_ptr, int n) { // self_inv_ptr should already be an identity matrix auto handle = at::cuda::getCurrentCUDASolverDnHandle(); at::cuda::solver::getrf<scalar_t>(handle, n, n, self_ptr, n, ipiv_ptr, info_ptr); at::cuda::solver::getrs<scalar_t>(handle, n, n, self_ptr, n, ipiv_ptr, self_inv_ptr, n, info_ptr + 1); } template <typename scalar_t> static void apply_batched_inverse_lib(Tensor& self, Tensor& self_inv, Tensor& infos) { const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); const int n = cuda_int_cast(self.size(-2), "self.size(-2)"); auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); if (use_loop_launch(batch_size, n)) { int* p_infos = infos.data_ptr<int>(); auto main_stream = at::cuda::getCurrentCUDAStream(); at::cuda::CUDAEvent main_event; main_event.record(main_stream); for (int64_t i = 0; i < batch_size; i++) { auto stream = at::cuda::getStreamFromPool(); at::cuda::CUDAStreamGuard guard(stream); main_event.block(stream); auto dataPtr = allocator.allocate(sizeof(int) * n); int* pivot = reinterpret_cast<int*>(dataPtr.get()); _apply_single_inverse_helper<scalar_t>( &self_data[i * self_mat_stride], &self_inv_data[i * self_inv_mat_stride], pivot, p_infos + i * 2, n); at::cuda::CUDAEvent finished; finished.record(stream); finished.block(main_stream); } } else { // cublas batched kernels require input be "device array of device pointers" Tensor self_array = at::arange( reinterpret_cast<long>(self_data), reinterpret_cast<long>(&self_data[(batch_size-1) * self_mat_stride]) + 1, static_cast<long>(self_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); Tensor self_inv_array = at::arange( reinterpret_cast<long>(self_inv_data), reinterpret_cast<long>(&self_inv_data[(batch_size-1) * self_inv_mat_stride]) + 1, static_cast<long>(self_inv_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); auto dataPtr = allocator.allocate(sizeof(int)*batch_size*n); int* ipiv_array = reinterpret_cast<int*>(dataPtr.get()); Tensor _info1 = at::zeros({batch_size}, self.options().dtype(at::kInt)); Tensor _info2 = at::zeros({batch_size}, self.options().dtype(at::kInt)); at::cuda::blas::getrfBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), n, ipiv_array, _info1.data_ptr<int>(), batch_size); at::cuda::blas::getriBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), n, ipiv_array, _info2.data_ptr<int>(), batch_size, reinterpret_cast<scalar_t**>(self_inv_array.data_ptr())); infos = at::stack({_info1, _info2}, 1); } } template <typename scalar_t> static void apply_single_inverse_lib(const Tensor& self, Tensor& self_inv, Tensor& info) { int n = cuda_int_cast(self.size(-2), "self.size(-2)"); Tensor ipiv = at::empty({n}, self.options().dtype(at::kInt)); _apply_single_inverse_helper<scalar_t>( self.data_ptr<scalar_t>(), self_inv.data_ptr<scalar_t>(), ipiv.data_ptr<int>(), info.data_ptr<int>(), n); } Tensor _inverse_helper_cuda_lib(const Tensor& self) { Tensor self_working_copy = cloneBatchedColumnMajor(self); Tensor self_inv_working_copy = column_major_identity_matrix_like(self_working_copy); const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); if (self.dim() > 2 && batch_size > 1) { Tensor infos = at::zeros({batchCount(self) * 2}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse_lib<scalar_t>( self_working_copy, self_inv_working_copy, infos); }); batchCheckErrors(infos, "inverse_cuda", false, 2); } else { Tensor info = at::zeros({2}, self.options().dtype(at::kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse_lib<scalar_t>(self_working_copy, self_inv_working_copy, info); }); batchCheckErrors(info, "inverse_cuda", false, 2); } return self_inv_working_copy; } }} // namespace at::native #endif // USE_CUSOLVER
17dbe0dc7e66c1e2b149af3ca324e436891f04c0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include "../c_code/helper_cuda.h" #include "static.h" Static * to_gpu_static(Static *cpu, int num) { Static * gpu = static_cast<Static*>(malloc(sizeof(Static)*1)); checkCudaErrors(hipMalloc((void**)&(gpu->p_dst), sizeof(int)*num)); checkCudaErrors(hipMemset(gpu->p_dst, 0, sizeof(int)*num)); checkCudaErrors(hipMemcpy(gpu->p_dst, cpu->p_dst, sizeof(int)*num, hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc((void**)&(gpu->p_weight), sizeof(double)*num)); checkCudaErrors(hipMemset(gpu->p_weight, 0, sizeof(double)*num)); checkCudaErrors(hipMemcpy(gpu->p_weight, cpu->p_weight, sizeof(double)*num, hipMemcpyHostToDevice)); Static * ret = NULL; checkCudaErrors(hipMalloc((void**)&(ret), sizeof(Static)*1)); checkCudaErrors(hipMemset(ret, 0, sizeof(Static)*1)); checkCudaErrors(hipMemcpy(ret, gpu, sizeof(Static)*1, hipMemcpyHostToDevice)); return ret; } Static * from_gpu_static(Static *gpu, int num) { Static * cpu = static_cast<Static*>(malloc(sizeof(Static)*1)); checkCudaErrors(hipMemcpy(cpu, gpu, sizeof(Static)*1, hipMemcpyDeviceToHost)); return cpu; }
17dbe0dc7e66c1e2b149af3ca324e436891f04c0.cu
#include <stdlib.h> #include "../c_code/helper_cuda.h" #include "static.h" Static * to_gpu_static(Static *cpu, int num) { Static * gpu = static_cast<Static*>(malloc(sizeof(Static)*1)); checkCudaErrors(cudaMalloc((void**)&(gpu->p_dst), sizeof(int)*num)); checkCudaErrors(cudaMemset(gpu->p_dst, 0, sizeof(int)*num)); checkCudaErrors(cudaMemcpy(gpu->p_dst, cpu->p_dst, sizeof(int)*num, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void**)&(gpu->p_weight), sizeof(double)*num)); checkCudaErrors(cudaMemset(gpu->p_weight, 0, sizeof(double)*num)); checkCudaErrors(cudaMemcpy(gpu->p_weight, cpu->p_weight, sizeof(double)*num, cudaMemcpyHostToDevice)); Static * ret = NULL; checkCudaErrors(cudaMalloc((void**)&(ret), sizeof(Static)*1)); checkCudaErrors(cudaMemset(ret, 0, sizeof(Static)*1)); checkCudaErrors(cudaMemcpy(ret, gpu, sizeof(Static)*1, cudaMemcpyHostToDevice)); return ret; } Static * from_gpu_static(Static *gpu, int num) { Static * cpu = static_cast<Static*>(malloc(sizeof(Static)*1)); checkCudaErrors(cudaMemcpy(cpu, gpu, sizeof(Static)*1, cudaMemcpyDeviceToHost)); return cpu; }
2169e73adfa5e8592ecf21ec9a92d52a31fac58c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Author: Ashwin Nanjappa Filename: GDelKernels.cu Copyright (c) 2013, School of Computing, National University of Singapore. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the National University of Singapore nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission from the National University of Singapore. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ //////////////////////////////////////////////////////////////////////////////// // Star Device Code //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////// Headers // #include "GDelInternal.h" #include "GDelKernels.h" #include "Geometry.h" ///////////////////////////////////////////////////////////////////// Kernels // __forceinline__ __device__ void grabPair ( KerInsertionData worksetData, int aVal, int bVal, int& curPairIdx ) { CudaAssert( aVal != bVal ); if ( aVal == Marker ) return; worksetData._vertStarArr[ curPairIdx ] = aVal; worksetData._vertArr [ curPairIdx ] = bVal; worksetData._vertStarArr[ curPairIdx + 1 ] = bVal; worksetData._vertArr [ curPairIdx + 1 ] = aVal; curPairIdx += 2; return; } // Do NOT change this to inline device function. // Inline device function was found to be comparitively slower! #define READ_GRID_VALUE( dGrid, gridWidth, loc, value ) \ /* Outer layer */ \ if ( ( loc.x == -1 ) || ( loc.x == gridWidth ) \ || ( loc.y == -1 ) || ( loc.y == gridWidth ) \ || ( loc.z == -1 ) || ( loc.z == gridWidth ) ) \ { \ value = Marker; \ } \ else \ /* Inner region */ \ { \ const int curIdx = coordToIdx( gridWidth, loc ); \ value = dGrid[ curIdx ]; \ } // Read pairs from grid, one thread per row // Invoked twice: // 1: Count tetra // 2: Read tetra __global__ void kerReadGridPairs ( const int* dGrid, int gridWidth, KerInsertionData worksetData, KernelMode mode ) { // 8 voxels and their Voronoi vertices const int Root = 0; const int Opp = 7; const int LinkNum = 6; const int LinkVertex[ LinkNum + 1 ] = { 6, 2, 3, 1, 5, 4, 6 }; int3 loc = ( blockIdx.x <= gridWidth ) ? make_int3( threadIdx.x - 1, blockIdx.x - 1, -1 ) : make_int3( gridWidth - 1, threadIdx.x - 1, -1 ); // Read row on other side of grid const int curThreadIdx = getCurThreadIdx(); const int pairIdxBeg = ( CountPerThreadPairs == mode ) ? 0 : worksetData._starVertMap[ curThreadIdx ]; int curPairIdx = pairIdxBeg; int vals[8]; int valIdx = 0; //// // Read one plane (4 voxels) in this row //// for ( int iy = 0; iy <= 1; ++iy ) { for ( int ix = 0; ix <= 1; ++ix ) { const int3 curLoc = make_int3( loc.x + ix, loc.y + iy, loc.z ); READ_GRID_VALUE( dGrid, gridWidth, curLoc, vals[ valIdx ] ); ++valIdx; } } //// // Move along row, using plane (4 voxels) from last read //// // Move along row for ( ; loc.z < gridWidth; ++loc.z ) { // Read 8 voxels around this voxel valIdx = 4; for ( int iy = 0; iy <= 1; ++iy ) { for ( int ix = 0; ix <= 1; ++ix ) { const int3 curLoc = make_int3( loc.x + ix, loc.y + iy, loc.z + 1 ); READ_GRID_VALUE( dGrid, gridWidth, curLoc, vals[ valIdx ] ); ++valIdx; } } // Check the main diagonal const int rootVal = vals[ Root ]; const int oppVal = vals[ Opp ]; const bool hasMarker = ( rootVal == Marker ) || ( oppVal == Marker ); if ( rootVal != oppVal ) { // Check 6 link pairs bool hasQuad = false; int aVal = vals[ LinkVertex[ 0 ] ]; for ( int vi = 0; vi < LinkNum; ++vi ) { const int bVal = vals[ LinkVertex[ vi + 1 ] ]; if ( ( aVal != bVal ) && ( aVal != rootVal ) && ( aVal != oppVal ) && ( bVal != rootVal ) && ( bVal != oppVal ) ) { // Just count if ( CountPerThreadPairs == mode ) { // 10 pairs normally, 6 pairs if either Root or Opp is Marker curPairIdx += ( hasMarker ? 6 : 10 ); } // Just read else { grabPair( worksetData, rootVal, aVal, curPairIdx ); grabPair( worksetData, oppVal, aVal, curPairIdx ); grabPair( worksetData, rootVal, bVal, curPairIdx ); grabPair( worksetData, oppVal, bVal, curPairIdx ); grabPair( worksetData, aVal, bVal, curPairIdx ); } hasQuad = true; } aVal = bVal; } if ( hasQuad && !hasMarker ) // Has a quad { if ( CountPerThreadPairs == mode ) curPairIdx += 2; else grabPair( worksetData, rootVal, oppVal, curPairIdx ); } } // Store plane for next row vals[ 0 ] = vals[ 4 ]; vals[ 1 ] = vals[ 5 ]; vals[ 2 ] = vals[ 6 ]; vals[ 3 ] = vals[ 7 ]; } // Write count of thread if ( CountPerThreadPairs == mode ) worksetData._vertArr[ curThreadIdx ] = curPairIdx - pairIdxBeg; return; } // (1) Mark starving worksets // (2) Estimate workset sizes for all (including starving) __global__ void kerMarkStarvingWorksets ( KerInsertionData worksetData, KerIntArray markArr, KerIntArray worksetSizeArr ) { // Iterate stars for ( int idx = getCurThreadIdx(); idx < worksetData._starNum; idx += getThreadNum() ) { const int setBeg = worksetData._starVertMap[ idx ]; int setEnd = ( ( idx + 1 ) < worksetData._starNum ) ? worksetData._starVertMap[ idx + 1 ] : worksetData._vertNum; if ( ( -1 == setBeg ) || ( -1 == setEnd ) ) setEnd = setBeg; const int setSize = setEnd - setBeg; CudaAssert( ( setSize >= 0 ) && "Invalid set size!" ); //CudaAssert( ( setSize >= 3 ) && "Cannot be less than 3!" ); // Check if starving if ( ( 0 < setSize ) && ( setSize < MinWorksetSize ) ) { markArr._arr[ idx ] = MinWorksetSize - setSize; // Mark worksetSizeArr._arr[ idx ] = MinWorksetSize; } // Not starving else { markArr._arr[ idx ] = 0; worksetSizeArr._arr[ idx ] = setSize; } } return; } __device__ bool isCandidateInSet ( int vert, KerIntArray vertArr, int vertBeg, int vertNum, int candidateVert ) { if ( vert == candidateVert ) return true; for ( int idx = 0; idx < vertNum; ++idx ) { if ( candidateVert == vertArr._arr[ vertBeg + idx ] ) return true; } return false; } __global__ void kerPumpWorksets ( KerInsertionData worksetData, KerIntArray newMapArr, KerIntArray newVertArr ) { // Iterate stars for ( int star = getCurThreadIdx(); star < worksetData._starNum; star += getThreadNum() ) { const int fromVertBeg = worksetData._starVertMap[ star ]; int fromVertEnd = ( star < ( worksetData._starNum - 1 ) ) ? worksetData._starVertMap[ star + 1 ] : worksetData._vertNum; if ( ( fromVertBeg == -1 ) || ( fromVertEnd == -1 ) ) fromVertEnd = fromVertBeg; const int fromVertNum = fromVertEnd - fromVertBeg; const int toVertBeg = newMapArr._arr[ star ]; const int toVertEnd = ( star < ( worksetData._starNum - 1 ) ) ? newMapArr._arr[ star + 1 ] : newVertArr._num; const int toVertNum = toVertEnd - toVertBeg; // Copy over int toSize = 0; for ( int fromIdx = fromVertBeg; fromIdx < fromVertEnd; ++fromIdx ) { newVertArr._arr[ toVertBeg + toSize ] = worksetData._vertArr[ fromIdx ]; ++toSize; } bool isPumping = ( fromVertNum < toVertNum ); if ( !isPumping ) continue; // For worksets that needs pumping, *pump* it! for ( int fromIdx = fromVertBeg; fromIdx < fromVertEnd; ++fromIdx ) { // Pick from the working set of a neighbor const int neighborVert = worksetData._vertArr[ fromIdx ]; const int neighborVertBeg = worksetData._starVertMap[ neighborVert ]; const int neighborVertEnd = ( neighborVert < ( worksetData._starNum - 1 ) ) ? worksetData._starVertMap[ neighborVert + 1 ] : worksetData._vertNum; for ( int candidateIdx = neighborVertBeg; candidateIdx < neighborVertEnd; ++candidateIdx ) { const int candidateVert = worksetData._vertArr[ candidateIdx ]; // Check if it's already there if ( !isCandidateInSet( star, newVertArr, toVertBeg, toSize, candidateVert ) ) { newVertArr._arr[ toVertBeg + toSize ] = candidateVert; ++toSize; if ( toSize >= toVertNum ) // Enough? { isPumping = false; break; } } } if ( !isPumping ) break; } // If that's still not enough, start considering vertices 0, 1,.... int starIdx = 0; while ( toSize < toVertNum ) { CudaAssert( ( starIdx < worksetData._starNum ) && "Not enough points" ); // Check if it's already there if ( !isCandidateInSet( star, newVertArr, toVertBeg, toSize, starIdx ) ) { // I don't want a missing point to get in here const int starBeg = worksetData._starVertMap[ starIdx ]; const int starEnd = ( starIdx < ( worksetData._starNum - 1 ) ) ? worksetData._starVertMap[ starIdx + 1 ] : worksetData._vertNum; if ( starBeg >= starEnd ) // Missing point continue; newVertArr._arr[ toVertBeg + toSize ] = starIdx; ++toSize; } ++starIdx; } } return; } // Given a list of sorted numbers (has duplicates and is non-contiguous) create a map // Note: The input map *should* already be initialized to -1 !!! // Guarantees: // (1) For a number that is in input list, its map value and its next number's map value will be correct // (2) For a number that is not in input list, either its map value is -1, the next one is -1, or size is 0 __global__ void kerMakeAllStarMap ( KerIntArray inArr, KerIntArray allStarMap, int starNum ) { const int curThreadIdx = getCurThreadIdx(); // Iterate input list of numbers for ( int idx = curThreadIdx; idx < inArr._num; idx += getThreadNum() ) { const int curVal = inArr._arr[ idx ]; const int nextVal = ( ( idx + 1 ) < inArr._num ) ? inArr._arr[ idx + 1 ] : starNum - 1; CudaAssert( ( curVal <= nextVal ) && "Input array of numbers is not sorted!" ); // Number changes at this index if ( curVal != nextVal ) { allStarMap._arr[ curVal + 1 ] = idx + 1; allStarMap._arr[ nextVal ] = idx + 1; } } if ( ( 0 == curThreadIdx ) && ( inArr._num > 0 ) ) { const int firstVal = inArr._arr[ 0 ]; allStarMap._arr[ firstVal ] = 0; // Zero index for first value in input list } return; } __global__ void kerMissingPointWorksetSize ( KerPointData pointData, KerInsertionData worksetData, const int* grid, int gridWidth, KerIntArray fromStarArr, KerIntArray newWorksetSizeArr ) { // Iterate through workset stars for ( int star = getCurThreadIdx(); star < worksetData._starNum; star += getThreadNum() ) { const int mapVal = worksetData._starVertMap[ star ]; int nextMapVal = ( star < ( worksetData._starNum - 1 ) ) ? worksetData._starVertMap[ star + 1 ] : worksetData._vertNum; if ( mapVal == -1 || nextMapVal == -1 ) nextMapVal = mapVal; const int starVertNum = nextMapVal - mapVal; // Check if non-missing point if ( starVertNum > 0 ) { fromStarArr._arr[ star ] = star; newWorksetSizeArr._arr[ star ] = starVertNum; } // Missing point else { //// // Convert point to grid location //// const Point3 point = pointData._pointArr[ star ]; const int3 gridCoord = { ( int ) point._p[0], ( int ) point._p[1], ( int ) point._p[2] }; const int gridIdx = coordToIdx( gridWidth, gridCoord ); //// // Get star and its workset at grid location //// CudaAssert( ( gridIdx >= 0 ) && ( gridIdx < ( gridWidth * gridWidth * gridWidth ) ) && "Point out of grid!" ); int winStar = grid[ gridIdx ]; const int winMapVal = worksetData._starVertMap[ winStar ]; int winNextMapVal = ( winStar < ( worksetData._starNum - 1 ) ) ? worksetData._starVertMap[ winStar + 1 ] : worksetData._vertNum; if ( ( -1 == winMapVal ) || ( -1 == winNextMapVal ) ) winNextMapVal = winMapVal; int winVertNum = winNextMapVal - winMapVal; CudaAssert( ( winMapVal >= 0 ) ); CudaAssert( ( winNextMapVal >= 0 ) ); CudaAssert( ( winVertNum > 0 ) && "Winning star workset cannot be empty!" ); //// // Write back new workset size //// if ( winVertNum == 0 ) // Even the winning star is missing, then pick arbitrary 4 points { winVertNum = MinWorksetSize; winStar = -1; } fromStarArr._arr[ star ] = winStar; newWorksetSizeArr._arr[ star ] = winVertNum; } } return; } __global__ void kerMakeMissingPointWorkset ( KerInsertionData worksetData, KerIntArray fromStarArr, KerIntArray newMap, KerIntArray newVertArr ) { // Iterate through stars for ( int star = getCurThreadIdx(); star < worksetData._starNum; star += getThreadNum() ) { const int newVertBeg = newMap._arr[ star ]; const int newVertEnd = ( star < ( worksetData._starNum - 1 ) ) ? newMap._arr[ star + 1 ] : newVertArr._num; const int newStarVertNum = newVertEnd - newVertBeg; CudaAssert( ( newStarVertNum >= MinWorksetSize ) && "Invalid size for star workset!" ); const int fromStar = fromStarArr._arr[ star ]; if ( fromStar >= 0 ) // We got a winning star { const int fromVertBeg = worksetData._starVertMap[ fromStar ]; const int fromVertEnd = ( fromStar < ( worksetData._starNum - 1 ) ) ? worksetData._starVertMap[ fromStar + 1 ] : worksetData._vertNum; const int fromStarVertNum = fromVertEnd - fromVertBeg; CudaAssert( ( fromStarVertNum == newStarVertNum ) && "Mismatching workset sizes!" ); // Copy from source to destination star for ( int idx = 0; idx < newStarVertNum; ++idx ) { const int fromIdx = fromVertBeg + idx; const int toIdx = newVertBeg + idx; newVertArr._arr[ toIdx ] = worksetData._vertArr[ fromIdx ]; } } else { CudaAssert( false && "Disconnected point." ); CudaAssert( ( newStarVertNum == MinWorksetSize ) && "Invalid size for star workset!" ); int startId = 0; // Create arbitrary workset for ( int idx = 0; idx < newStarVertNum; ++idx ) { if ( startId == star ) ++startId; newVertArr._arr[ newVertBeg + idx ] = startId; ++startId; } } } return; } __global__ void kerGetPerTriangleInsertion ( KerStarData starData, KerInsertionData insertData, KerIntArray activeStarArr, KerIntArray activeTriCountArr, KerIntArray activeTriArr, KerShortArray triInsNumArr ) { // Iterate only active stars for ( int idx = getCurThreadIdx(); idx < activeStarArr._num; idx += getThreadNum() ) { const int star = activeStarArr._arr[ idx ]; //// // Number of insertions //// const int insBeg = insertData._starVertMap[ star ]; int insEnd = ( star < ( starData._starNum - 1 ) ) ? insertData._starVertMap[ star + 1 ] : insertData._vertNum; if ( ( -1 == insBeg ) || ( -1 == insEnd ) ) insEnd = insBeg; const int insCount = insEnd - insBeg; //// // Walk through the triangles //// const StarInfo starInfo = starData.getStarInfo( star ); int activeTriIdx = activeTriCountArr._arr[ idx ]; // Iterate star triangles for ( int locTriIdx = 0; locTriIdx < starInfo._totalSize; ++locTriIdx ) { activeTriArr._arr[ activeTriIdx ] = starInfo.toGlobalTriIdx( locTriIdx ); // Store global index for reuse triInsNumArr._arr[ activeTriIdx ] = insCount; // Store insertion count ++activeTriIdx; } } return; } __device__ TriPositionEx getNextFreeTri ( KerStarData starData, StarInfo starInfo, TriPositionEx prevTriPos ) { // There must be a free triangle while ( true ) { starInfo.moveNextTri( prevTriPos ); const TriangleStatus status = starData.triStatusAt( prevTriPos ); if ( ( Free == status ) || ( Beneath == status ) ) return prevTriPos; } CudaAssert( false && "No free triangle found!" ); return prevTriPos; } // Find first valid triangle adjacent to hole boundary // There must be one! __device__ void findFirstHoleSegment ( KerStarData starData, StarInfo starInfo, TriPositionEx beneathTriPos, TriPositionEx& firstTriPos, int& firstVi, TriPositionEx& firstHoleTriPos ) { const TriangleOpp triOppFirst = starData.triOppAt( beneathTriPos ); //// // Use the cached beneath triangle and check if it is on hole boundary //// for ( int vi = 0; vi < 3; ++vi ) { const TriPositionEx oppTriPos = starInfo.locToTriPos( triOppFirst._opp[ vi ] ); const TriangleStatus status = starData.triStatusAt( oppTriPos ); if ( ( Free != status ) && ( Beneath != status ) ) // Found a hole edge { firstTriPos = oppTriPos; firstHoleTriPos = beneathTriPos; TriangleOpp& oppTriOpp = starData.triOppAt( oppTriPos ); firstVi = oppTriOpp.indexOfOpp( starInfo.toLocTriIdx( beneathTriPos ) ); return; } } //// // Iterate through triangles and find one that is on hole boundary //// for ( int locTriIdx = 0; locTriIdx < starInfo._totalSize; ++locTriIdx ) { const TriPositionEx triPos = starInfo.locToTriPos( locTriIdx ); const TriangleStatus status = starData.triStatusAt( triPos ); // Ignore beneath triangles if ( ( Free == status ) || ( Beneath == status ) ) continue; const TriangleOpp triOpp = starData.triOppAt( triPos ); // Iterate segments of beneath triangle for ( int vi = 0; vi < 3; ++vi ) { const TriPositionEx triOppPos = starInfo.locToTriPos( triOpp._opp[ vi ] ); const TriangleStatus status = starData.triStatusAt( triOppPos ); if ( Beneath == status ) // Found a hole edge { firstTriPos = triPos; firstVi = vi; firstHoleTriPos = triOppPos; return; } } } return; } __global__ void kerStitchPointToHole ( KerStarData starData, KerInsertionData insertData, KerIntArray activeStarArr, int insIdx ) { // Reset exact check needed flag if ( ( 0 == threadIdx.x ) && ( 0 == blockIdx.x ) ) { starData._flagArr[ ExactTriNum ] = 0; } // Iterate stars for ( int idx = getCurThreadIdx(); idx < activeStarArr._num; idx += getThreadNum() ) { const int star = activeStarArr._arr[ idx ]; const TriPosition beneathTri = starData._insStatusArr[ star ]; // No insertion needed OR this star is dead if ( -1 == beneathTri ) continue; // Move on const TriPositionEx beneathTriPos = decodeTriPos( beneathTri ); // Mark this as successful insertion (not drowned) const int insIdxBeg = insertData._starVertMap[ star ]; const int insLoc = insIdxBeg + insIdx; const int insVert = insertData._vertArr[ insLoc ]; //// // Find first hole segment //// TriPositionEx firstTriPos; TriPositionEx firstNewTriPos; int firstVi = -1; // Use the first hole triangle to store the first new triangle const StarInfo starInfo = starData.getStarInfo( star ); findFirstHoleSegment( starData, starInfo, beneathTriPos, firstTriPos, firstVi, firstNewTriPos ); //// // Check if insertion point killed star //// if ( -1 == firstVi ) { markStarDeath( starData._deathCertArr[ star ], insVert, beneathTri ); insertData._vertStarArr[ insLoc ] = star; // Mark so that we get death certificate later insertData._vertArr[ insLoc ] = flipVertex( insVert ); // Flip so that we can distinguish between drowned and dead items continue; // Move on } // Mark as successful insertion (not drowned) insertData._vertStarArr[ insLoc ] = -1; //// // First stitched triangle //// // Get the first two vertices of the hole TriPositionEx curTriPos = firstTriPos; const Triangle& curTri = starData.triangleAt( curTriPos ); const int firstVert = curTri._v[ ( firstVi + 1 ) % 3 ]; int curVi = ( firstVi + 2 ) % 3; int curVert = curTri._v[ curVi ]; // Stitch the first triangle const Triangle firstNewTri = { insVert, curVert, firstVert }; starData.triangleAt( firstNewTriPos ) = firstNewTri; starData.triStatusAt( firstNewTriPos ) = NewValidAndUnchecked; // Adjancency with opposite triangle TriangleOpp& firstNewTriOpp = starData.triOppAt( firstNewTriPos ); firstNewTriOpp._opp[ 0 ] = starInfo.toLocTriIdx( firstTriPos ); //// // Walk around hole, stitching rest of triangles //// TriPositionEx prevFreeTriPos = makeTriPosEx( starInfo._beg0 - 1, 0 ); TriPositionEx prevNewTriPos = firstNewTriPos; // Walk outside the hole in cw direction while ( curVert != firstVert ) { // Opposite triangle const TriangleOpp& curTriOpp = starData.triOppAt( curTriPos ); const TriPositionEx gloOppTriPos = starInfo.locToTriPos( curTriOpp._opp[ ( curVi + 2 ) % 3 ] ); const TriangleStatus status = starData.triStatusAt( gloOppTriPos ); CudaAssert( Free != status ); // Opposite triangle is valid (not in hole) if ( ( Beneath != status ) && ( NewValidAndUnchecked != status ) ) { // Continue moving const TriangleOpp& oppTriOpp = starData.triOppAt( gloOppTriPos ); const int oppVi = oppTriOpp.indexOfOpp( starInfo.toLocTriIdx( curTriPos ) ); curVi = ( oppVi + 2 ) % 3; curTriPos = gloOppTriPos; } // Hole triangle is found else { TriPositionEx newTriPos; // Find a free triangle if ( Beneath == status ) { newTriPos = gloOppTriPos; } else { newTriPos = getNextFreeTri( starData, starInfo, prevFreeTriPos ); prevFreeTriPos = newTriPos; } // Get the next vertex in the hole boundary const int oppVi = ( curVi + 2 ) % 3; const Triangle& curTri = starData.triangleAt( curTriPos ); const int nextVert = curTri._v[ ( curVi + 1 ) % 3 ]; // New triangle const int locNewTriIdx = starInfo.toLocTriIdx( newTriPos ); const Triangle newTri = { insVert, nextVert, curVert }; // Adjancency with opposite triangle TriangleOpp& curTriOpp = starData.triOppAt( curTriPos ); curTriOpp._opp[ oppVi ] = locNewTriIdx; TriangleOpp& newTriOpp = starData.triOppAt( newTriPos ); newTriOpp._opp[ 0 ] = starInfo.toLocTriIdx( curTriPos ); // Adjacency with previous new triangle TriangleOpp& prevTriOpp = starData.triOppAt( prevNewTriPos ); prevTriOpp._opp[2] = locNewTriIdx; newTriOpp._opp[1] = starInfo.toLocTriIdx( prevNewTriPos ); // Last hole triangle if ( nextVert == firstVert ) { TriangleOpp& firstTriOpp = starData.triOppAt( firstNewTriPos ); firstTriOpp._opp[1] = locNewTriIdx; newTriOpp._opp[2] = starInfo.toLocTriIdx( firstNewTriPos ); } // Store new triangle data starData.triangleAt( newTriPos ) = newTri; starData.triStarAt( newTriPos ) = star; starData.triStatusAt( newTriPos ) = NewValidAndUnchecked; // Prepare for next triangle prevNewTriPos = newTriPos; // Move to the next vertex curVi = ( curVi + 1 ) % 3; curVert = nextVert; } } } return; } __global__ void kerSetBeneathToFree ( KerStarData starData, KerIntArray activeTriArr, KerShortArray triInsNumArr ) { // Iterate triangles for ( int idx = getCurThreadIdx(); idx < activeTriArr._num; idx += getThreadNum() ) { // Ignore triangles that had no insertions if ( 0 == triInsNumArr._arr[ idx ] ) continue; const int triIdx = activeTriArr._arr[ idx ]; const TriPositionEx triPos = starData.globToTriPos( triIdx ); TriangleStatus& triStatus = starData.triStatusAt( triPos ); // Check if beneath triangle if ( Beneath == triStatus ) { const int star = starData.triStarAt( triPos ); // Check if star of triangle is alive if ( Alive == getDeathStatus( starData._deathCertArr[ star ] ) ) triStatus = Free; } } return; } __global__ void kerCountPointsOfStar( KerStarData starData ) { // Iterate through stars for ( int star = getCurThreadIdx(); star < starData._starNum; star += getThreadNum() ) { // Ignore dead star if ( Alive != getDeathStatus( starData._deathCertArr[ star ] ) ) continue; // Ignore star with no insertion in this round if ( 0 == starData._insCountArr[ star ] ) continue; const StarInfo starInfo = starData.getStarInfo( star ); int validTriCount = starInfo._totalSize; for ( int locTriIdx = 0; locTriIdx < starInfo._totalSize; ++locTriIdx ) { const TriPositionEx triPos = starInfo.locToTriPos( locTriIdx ); const TriangleStatus triStatus = starData.triStatusAt( triPos ); CudaAssert( ( Beneath != triStatus ) && "No beneath triangle should have survived outside of insertion iterations!" ); if ( Free == triStatus ) --validTriCount; } // Get point count CudaAssert( ( 0 == ( ( validTriCount + 4 ) % 2 ) ) && "2-sphere triangle count not divisible by 2!" ); starData._pointNumArr[ star ] = ( validTriCount + 4 ) / 2; } return; } __device__ bool _checkStarHasVertex ( KerStarData starData, int star, int inVert ) { const StarInfo starInfo = starData.getStarInfo( star ); // Iterate triangles of star for ( int locTriIdx = 0; locTriIdx < starInfo._totalSize; ++locTriIdx ) { const TriPositionEx triPos = starInfo.locToTriPos( locTriIdx ); const TriangleStatus status = starData.triStatusAt( triPos ); const Triangle& tri = starData.triangleAt( triPos ); CudaAssert( ( Beneath != status ) && "Cannot be a dead star!" ); if ( ( Free != status ) && tri.hasVertex( inVert ) ) return true; } return false; } // Check if triangle v0-v1-v2 is in star // If not found, check if vertex in star // Update the vertex to -1 if it doesn't need to be inserted. // Return the number of insertion required, or -1 if consistent __device__ int _checkStarHasFacetOrPoint ( KerStarData starData, TriPositionEx fromTriLoc, int toIdxInFrom, int toStar, int& fromVert, int& v1, int& v2 ) { const StarInfo toStarInfo = starData.getStarInfo( toStar ); int hasVert = 0; // Iterate triangles of star for ( int toLocTriIdx = 0; toLocTriIdx < toStarInfo._totalSize; ++toLocTriIdx ) { const TriPositionEx triPos = toStarInfo.locToTriPos( toLocTriIdx ); const TriangleStatus status = starData.triStatusAt( triPos ); // Ignore free triangle if ( Free == status ) continue; CudaAssert( ( Beneath != status ) && "Star is dead! Cannot happen! Should have been ignored during facet generation!" ); // Check for vertex in triangle const Triangle tri = starData.triangleAt( triPos ); const int triHasVert = ( tri.hasVertex( fromVert ) ) + ( tri.hasVertex( v1 ) << 1 ) + ( tri.hasVertex( v2 ) << 2 ); hasVert |= triHasVert; if ( 0x7 == triHasVert ) // All three bits are 1 { // Consistent, note down the index starData.triOppTetraAt( fromTriLoc )._opp[ toIdxInFrom ] = toLocTriIdx; return -1; // Get out! } } if ( hasVert & 0x01 ) fromVert = -1; if ( hasVert & 0x02 ) v1 = -1; if ( hasVert & 0x04 ) v2 = -1; return 3 - __popc( hasVert ); // __popc gives the number of 1s in the binary representation } // First half of the facet processing logic __global__ void kerGetFacetInsertCount ( KerStarData starData, KerFacetData facetData ) { // Iterate facet items for ( int facetIdx = getCurThreadIdx(); facetIdx < facetData._num; facetIdx += getThreadNum() ) { // Read facet item const int toStar = facetData._toStarArr[ facetIdx ]; int& fromStar = facetData._fromStarArr[ facetIdx ]; // Will be modified below int insertPointNum = -1; // Drowned point OR dead star if ( facetIdx < facetData._drownedFacetNum ) { CudaAssert( ( toStar < 0 ) && "Drowned/dead to-star should be negative value!" ); // Reset to positive const int posToStar = flipVertex( toStar ); // Check if star was killed and its killer vert is in this pair if ( fromStar < 0 ) { CudaAssert( ( DeadNeedCert == getDeathStatus( starData._deathCertArr[ posToStar ] ) ) && "Killed star has to be in NeedCert state!" ); insertPointNum = 0; // No insertions, certificate is generated by another kernel } // Check if drowner star has just died else if ( Alive != getDeathStatus( starData._deathCertArr[ posToStar ] ) ) { insertPointNum = 0; } // Check if drowned point has died else if ( Alive != getDeathStatus( starData._deathCertArr[ fromStar ] ) ) { insertPointNum = 0; } // Both are not dead, check if drowner is still in link of drowned point else if ( _checkStarHasVertex( starData, fromStar, posToStar ) ) { insertPointNum = ProofPointsPerStar; } else { insertPointNum = 0; } } // Normal facet item else { CudaAssert( ( toStar >= 0 ) && ( fromStar >= 0 ) && "Invalid from or to stars!" ); Segment& seg = facetData._segArr[ facetIdx ]; // Will be modified below int triIdx = facetData._fromTriArr[ facetIdx ]; // Decode const int triStarIdx = triIdx & 0x03; triIdx >>= 2; //// // Check if to-star is already dead //// const int toDeathStatus = getDeathStatus( starData._deathCertArr[ toStar ] ); if ( Alive != toDeathStatus ) { insertPointNum = DeathCertSize; facetData._toStarArr[ facetIdx ] = flipVertex( toStar ); // Note down death by negating to-star } else { // Check if to-star has facet const TriPositionEx triPos = starData.globToTriPos( triIdx ); insertPointNum = _checkStarHasFacetOrPoint( starData, triPos, triStarIdx, toStar, fromStar, seg._v[0], seg._v[1] ); // Triangle not found in to-star if ( insertPointNum != -1 ) { // Raise the corresponding triangle as unchecked starData.triStatusAt( triPos ) = ValidAndUnchecked; } // Triangle found, is consistent else { insertPointNum = 0; } } } // Set status of facet facetData._insertMapArr[ facetIdx ] = insertPointNum; } return; } // Generate facet-item count only for valid triangles // Free and Beneath triangles are ignored __global__ void kerGetValidFacetCount ( KerStarData starData, KerIntArray countArr ) { const int FacetsPerTriangle = 3; // Iterate triangles for ( int triIdx = getCurThreadIdx(); triIdx < starData._totalTriNum; triIdx += getThreadNum() ) { const TriPositionEx triPos = starData.globToTriPos( triIdx ); const TriangleStatus triStatus = starData.triStatusAt( triPos ); const bool doCheckTri = ( ValidAndUnchecked == triStatus ) || ( NewValidAndUnchecked == triStatus ); countArr._arr[ triIdx ] = ( doCheckTri ? FacetsPerTriangle : 0 ); } return; } __global__ void kerMakeFacetFromDrownedPoint ( KerStarData starData, KerInsertionData insertData, KerFacetData facetData ) { // Iterate drowned points for ( int idx = getCurThreadIdx(); idx < facetData._drownedFacetNum; idx += getThreadNum() ) { // Insert drowned point disguised as facet item // *Swap* from-star and to-star so that later kernels are easy facetData._fromStarArr[ idx ] = insertData._vertArr[ idx ]; // Flip toStar so it appears at front of facet list after sorting facetData._toStarArr[ idx ] = flipVertex( insertData._vertStarArr[ idx ] ); } return; } __global__ void kerGetValidFacets ( KerStarData starData, KerIntArray facetMap, KerFacetData facetData ) { // Facet count begins here in count array const int facetCountBeg = facetData._drownedFacetNum; // Iterate triangles for ( int triIdx = getCurThreadIdx(); triIdx < starData._totalTriNum; triIdx += getThreadNum() ) { int facetIdxBeg = facetCountBeg + facetMap._arr[ triIdx ]; int facetIdxEnd = ( triIdx < ( starData._totalTriNum - 1 ) ) ? ( facetCountBeg + facetMap._arr[ triIdx + 1 ] ) : facetData._num; int facetIdx = facetIdxBeg; // Get out if beyond facets or FacetMax bound (whichever is smaller) if ( facetIdxBeg >= facetData._num ) break; // No need to check this triangle if ( facetIdxEnd == facetIdxBeg ) continue; //// // Add 3 facet items for this triangle //// const TriPositionEx triPos = starData.globToTriPos( triIdx ); TriangleStatus& triStatus = starData.triStatusAt( triPos ); const Triangle tri = starData.triangleAt( triPos ); const int star = starData.triStarAt( triPos ); CudaAssert( ( Free != triStatus ) && ( Beneath != triStatus ) ); // Set as valid since we are going to check it // Will be invalid again if found out during the check triStatus = Valid; // Iterate triangle vertices for ( int vi = 0; vi < 3; ++vi ) { const int toStar = tri._v[ vi ]; const Segment seg = { tri._v[ ( vi + 1 ) % 3 ], tri._v[ ( vi + 2 ) % 3 ] }; // Facet item facetData._fromStarArr[ facetIdx ] = star; facetData._toStarArr[ facetIdx ] = toStar; facetData._fromTriArr[ facetIdx ] = ( triIdx << 2 ) | vi; // Encode orientation facetData._segArr[ facetIdx ] = seg; ++facetIdx; } } return; } __global__ void kerGetPerFacetInsertions ( KerStarData starData, KerFacetData facetData, KerInsertionData insertData ) { // Iterate through non-drowned facets for ( int facetIdx = facetData._drownedFacetNum + getCurThreadIdx(); facetIdx < facetData._num; facetIdx += getThreadNum() ) { int insertIdxBeg = facetData._insertMapArr[ facetIdx ]; const int insertIdxEnd = ( facetIdx < ( facetData._num - 1 ) ) ? facetData._insertMapArr[ facetIdx + 1 ] : insertData._vertNum; // No insertions for this facet if ( insertIdxBeg == insertIdxEnd ) continue; //// // Point insertions //// const int toStar = facetData._toStarArr[ facetIdx ]; const int fromStar = facetData._fromStarArr[ facetIdx ]; // Set to -1 by kerGetFacetInsertCount if found in to-star const Segment seg = facetData._segArr[ facetIdx ]; // Set to -1 by kerGetFacetInsertCount if found in to-star const int triad[3] = { fromStar, seg._v[0], seg._v[1] }; // To-star is dead if ( toStar < 0 ) { const int posToStar = flipVertex( toStar ); CudaAssert( ( getDeathStatus( starData._deathCertArr[ posToStar ] ) >= 0 ) && "To-Star must be dead with certificate!" ); CudaAssert( ( DeathCertSize == ( insertIdxEnd - insertIdxBeg ) ) && "Invalid insertion size for to-star death!" ); const DeathCert& deathCert = starData._deathCertArr[ posToStar ]; //// // Store death certificate as insertions //// for ( int i = 0; i < DeathCertSize; ++i ) { const int certVert = deathCert._v[ i ]; insertData._vertStarArr[ insertIdxBeg ] = fromStar; insertData._vertArr[ insertIdxBeg ] = flipVertex( certVert ); // Negate to check if in star later ++insertIdxBeg; } } // To-star is alive else { for ( int i = 0; i < 3; ++i ) { const int vert = triad[i]; // Vertex not in to-star if ( vert != -1 ) { // Insert this point since it is not there yet insertData._vertStarArr[ insertIdxBeg ] = toStar; insertData._vertArr[ insertIdxBeg ] = vert; ++insertIdxBeg; } } CudaAssert( ( insertIdxBeg == insertIdxEnd ) && "Invalid number of insertions for PointInconsistent" ); } } return; } // Insertions that do not pass inspection remain negative, and are removed during compaction __global__ void kerCheckCertInsertions ( KerStarData starData, KerInsertionData insertData ) { // Iterate insertions for ( int idx = getCurThreadIdx(); idx < insertData._vertNum; idx += getThreadNum() ) { const int insVert = insertData._vertArr[ idx ]; // Ignore vertices that need no inspection // Note: Only death certificate insertions need inspection if ( insVert >= 0 ) continue; const int insStar = insertData._vertStarArr[ idx ]; const int posInsVert = flipVertex( insVert ); // 1: Ensure insert vertex is not same as star if ( insStar == posInsVert ) continue; // 2: Ensure insert vertex is not in link of star if ( _checkStarHasVertex( starData, insStar, posInsVert ) ) continue; // Make insert vertex positive, so it is not removed during compaction insertData._vertArr[ idx ] = posInsVert; } return; } __global__ void kerCountPerStarInsertions ( KerStarData starData, KerInsertionData insertData ) { for ( int star = getCurThreadIdx(); star < starData._starNum; star += getThreadNum() ) { //// // Update point number to be inserted AND drowned number //// const int insBeg = insertData._starVertMap[ star ]; int insEnd = ( star < ( starData._starNum - 1 ) ) ? insertData._starVertMap[ star + 1 ] : insertData._vertNum; if ( ( -1 == insBeg ) || ( -1 == insEnd ) ) insEnd = insBeg; const int insPointNum = insEnd - insBeg; CudaAssert( ( insPointNum >= 0 ) && "Invalid indices!" ); // Insert point count for this star starData._insCountArr[ star ] = insPointNum; //// // Drowned count for this star // Given star of n link points and m insertion points, only a maximum // of (n + m - 4) points can drown //// const int starPointNum = starData._pointNumArr[ star ]; const int totalPointNum = starPointNum + insPointNum; // Update star point count starData._pointNumArr[ star ] = totalPointNum; } return; } __global__ void kerComputeTriangleCount ( KerStarData starData, KerIntArray triNumArr ) { const float ExpandFactor = 1.0f; // Iterate through stars for ( int star = getCurThreadIdx(); star < starData._starNum; star += getThreadNum() ) { // Current number of triangles const StarInfo starInfo = starData.getStarInfo( star ); const int curTriNum = starInfo._totalSize; // Expected number of points (current + expected insertions) const int expPointNum = starData._pointNumArr[ star ]; // Expected number of triangles const int insTriNum = get2SphereTriangleNum( 1, expPointNum ); const int newTriNum = insTriNum * ExpandFactor; triNumArr._arr[ star ] = max( newTriNum, curTriNum ) - starInfo._size0; // Only "expand" second array } return; } __global__ void kerMarkOwnedTriangles ( KerStarData starData, KerIntArray tetraTriMap ) { // Iterate triangles for ( int triIdx = getCurThreadIdx(); triIdx < starData._totalTriNum; triIdx += getThreadNum() ) { const TriPositionEx triPos = starData.globToTriPos( triIdx ); const TriangleStatus status = starData.triStatusAt( triPos ); if ( ( Free == status ) || ( Beneath == status ) ) continue; // Check if star is triangle owner const Triangle tri = starData.triangleAt( triPos ); const int star = starData.triStarAt( triPos ); if ( ( star < tri._v[0] ) & ( star < tri._v[1] ) & ( star < tri._v[2] ) ) tetraTriMap._arr[ triIdx ] = triIdx; } return; } __global__ void kerGrabTetrasFromStars ( KerStarData starData, KerTetraData tetraData, KerIntArray validArr, KerIntArray triTetraMap, KerIntArray tetraTriMap, KerIntArray compressedIndex, int tetraNum ) { // Iterate list of all triangles for ( int idx = getCurThreadIdx(); idx < tetraNum; idx += getThreadNum() ) { if ( 0 == validArr._arr[ idx ] ) continue; // Construct the 4 vertices of the tetra const TriPositionEx triPos = starData.globToTriPos( tetraTriMap._arr[ idx ] ); const int star = starData.triStarAt( triPos ); const StarInfo starInfo = starData.getStarInfo( star ); const Triangle tri = starData.triangleAt( triPos ); CudaAssert( Valid == starData.triStatusAt( triPos ) ); Tetrahedron tetra; const int v0 = tri._v[0]; tetra._v[0] = v0; tetra._v[1] = tri._v[1]; tetra._v[2] = tri._v[2]; tetra._v[3] = star; const TriangleOpp triOpp = starData.triOppAt( triPos ); for ( int vi = 0; vi < 3; ++vi ) { const int gloOppTriIdx = starInfo.toGlobalTriIdx( triOpp._opp[ vi ] ); const int oppTetraIdx = triTetraMap._arr[ gloOppTriIdx ]; tetra._opp[ vi ] = ( -1 == oppTetraIdx ) ? -1 : compressedIndex._arr[ oppTetraIdx ]; } // Locate the triangle in _v[ 0 ]'s star const TriOppTetra& oppTetra = starData.triOppTetraAt( triPos ); const StarInfo v0Info = starData.getStarInfo( v0 ); const TriPositionEx v0TriPos = v0Info.locToTriPos( oppTetra._opp[ 0 ] ); // Find the tetra opposite _v[ 3 ] const Triangle v0tri = starData.triangleAt( v0TriPos ); const int starIdx = v0tri.indexOfVert( star ); const TriangleOpp& v0triOpp = starData.triOppAt( v0TriPos ); const int starTriOppIdx = v0Info.toGlobalTriIdx( v0triOpp._opp[ starIdx ] ); const int starOppTetraIdx = triTetraMap._arr[ starTriOppIdx ]; tetra._opp[ 3 ] = ( -1 == starOppTetraIdx ) ? -1 : compressedIndex._arr[ starOppTetraIdx ]; // Write tetra tetraData._arr[ compressedIndex._arr[ idx ] ] = tetra; } return; } __global__ void kerInvalidateFreeTriangles( KerStarData starData ) { // Iterate through triangles for ( int triIdx = getCurThreadIdx(); triIdx < starData._totalTriNum; triIdx += getThreadNum() ) { const TriPositionEx triPos = starData.globToTriPos( triIdx ); const TriangleStatus triStatus = starData.triStatusAt( triPos ); if ( ( Free == triStatus ) || ( Beneath == triStatus ) ) { Triangle& tri = starData.triangleAt( triPos ); // Invalidate triangle, so it can be detected during consistency check tri._v[0] = -1; } } return; } __global__ void kerCheckStarConsistency( KerStarData starData ) { // Iterate all triangles for ( int triIdx = getCurThreadIdx(); triIdx < starData._totalTriNum; triIdx += getThreadNum() ) { //// // Ignore free triangles //// const TriPositionEx triPos = starData.globToTriPos( triIdx ); TriangleStatus& triStatus = starData.triStatusAt( triPos ); if ( ( Free == triStatus ) || ( Beneath == triStatus ) ) continue; CudaAssert( ( Valid == triStatus ) && "Triangles should be valid at this stage!" ); //// // Check if triangle is consistent //// const int star = starData.triStarAt( triPos ); const Triangle tri = starData.triangleAt( triPos ); const TriOppTetra triOppTetra = starData.triOppTetraAt( triPos ); for ( int vi = 0; vi < 3; ++vi ) { const int toVert = tri._v[ vi ]; const StarInfo starInfo = starData.getStarInfo( toVert ); const TriPositionEx oppTriPos = starInfo.locToTriPos( triOppTetra._opp[ vi ] ); const Triangle oppTri = starData.triangleAt( oppTriPos ); // Check if triangle matches // Note: Free-Beneath triangle._v[0] has been set to -1, so they fail const bool consistent = oppTri.hasVertex( star ) && oppTri.hasVertex( tri._v[ ( vi + 1 ) % 3 ] ) && oppTri.hasVertex( tri._v[ ( vi + 2 ) % 3 ] ); if ( !consistent ) { starData._flagArr[ ExactTriNum ] = 1; triStatus = ValidAndUnchecked; break; } } } return; } __global__ void kerAppendValueToKey( KerInsertionData insertData, int bitsPerValue ) { const int ValMask = 1 << bitsPerValue; // Iterate array for ( int idx = getCurThreadIdx(); idx < insertData._vertNum; idx += getThreadNum() ) { const int key = insertData._vertStarArr[ idx ]; const int val = insertData._vertArr[ idx ]; CudaAssert( ( key >= 0 ) && "Invalid key!" ); insertData._vertStarArr[ idx ] = ( ( key << bitsPerValue ) | ( val & ( ValMask - 1 ) ) ); } return; } __global__ void kerRemoveValueFromKey( KerInsertionData insertData, int bitsPerValue ) { // Iterate array for ( int idx = getCurThreadIdx(); idx < insertData._vertNum; idx += getThreadNum() ) { const int keyvalue = insertData._vertStarArr[ idx ]; CudaAssert( ( keyvalue >= 0 ) && "Key-Value is invalid!" ); insertData._vertStarArr[ idx ] = ( keyvalue >> bitsPerValue ); } return; } __global__ void kerMarkDuplicates( KerIntArray keyArr, KerIntArray valueArr ) { // Iterate array for ( int idx = getCurThreadIdx(); idx < keyArr._num; idx += getThreadNum() ) { const int key = keyArr._arr[ idx ]; const int val = valueArr._arr[ idx ]; int nextIdx = idx + 1; while ( nextIdx < keyArr._num ) { const int nextKey = keyArr._arr[ nextIdx ]; if ( nextKey != key ) break; const int nextVal = valueArr._arr[ nextIdx ]; if ( ( val == nextVal ) || ( val == flipVertex( nextVal ) ) ) { valueArr._arr[ idx ] = flipVertex( val ); break; } ++nextIdx; } } return; } // Make map AND also update triStar and triStatus array __global__ void kerMakeOldNewTriMap ( KerStarData oldStarData, int oldTriNum, KerIntArray newTriMap, KerIntArray oldNewMap, KerIntArray newTriStar, KerTriStatusArray newTriStatus ) { // Iterate through triangles for ( int oldTriIdx = getCurThreadIdx(); oldTriIdx < oldTriNum; oldTriIdx += getThreadNum() ) { //// // Skip copying free triangle information //// const TriangleStatus status = oldStarData._triStatusArr[1][ oldTriIdx ]; if ( Free == status ) continue; //// // Make map //// const int starIdx = oldStarData._triStarArr[1][ oldTriIdx ]; // Star const int oldTriBeg = oldStarData._starTriMap[1][ starIdx ]; // Old location const int newTriBeg = newTriMap._arr[ starIdx ]; // New location const int newTriIdx = oldTriIdx - oldTriBeg + newTriBeg; // New location oldNewMap._arr[ oldTriIdx ] = newTriIdx; newTriStar._arr[ newTriIdx ] = starIdx; newTriStatus._arr[ newTriIdx ] = status; } return; } __global__ void kerGetActiveTriCount ( KerStarData starData, KerIntArray activeStarArr, KerIntArray activeTriCountArr ) { // Iterate stars for ( int idx = getCurThreadIdx(); idx < activeStarArr._num; idx += getThreadNum() ) { const int star = activeStarArr._arr[ idx ]; const StarInfo starInfo = starData.getStarInfo( star ); activeTriCountArr._arr[ idx ] = starInfo._totalSize; } return; } ////////////////////////////////////////////////////////////////////////////////
2169e73adfa5e8592ecf21ec9a92d52a31fac58c.cu
/* Author: Ashwin Nanjappa Filename: GDelKernels.cu Copyright (c) 2013, School of Computing, National University of Singapore. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the National University of Singapore nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission from the National University of Singapore. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ //////////////////////////////////////////////////////////////////////////////// // Star Device Code //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////// Headers // #include "GDelInternal.h" #include "GDelKernels.h" #include "Geometry.h" ///////////////////////////////////////////////////////////////////// Kernels // __forceinline__ __device__ void grabPair ( KerInsertionData worksetData, int aVal, int bVal, int& curPairIdx ) { CudaAssert( aVal != bVal ); if ( aVal == Marker ) return; worksetData._vertStarArr[ curPairIdx ] = aVal; worksetData._vertArr [ curPairIdx ] = bVal; worksetData._vertStarArr[ curPairIdx + 1 ] = bVal; worksetData._vertArr [ curPairIdx + 1 ] = aVal; curPairIdx += 2; return; } // Do NOT change this to inline device function. // Inline device function was found to be comparitively slower! #define READ_GRID_VALUE( dGrid, gridWidth, loc, value ) \ /* Outer layer */ \ if ( ( loc.x == -1 ) || ( loc.x == gridWidth ) \ || ( loc.y == -1 ) || ( loc.y == gridWidth ) \ || ( loc.z == -1 ) || ( loc.z == gridWidth ) ) \ { \ value = Marker; \ } \ else \ /* Inner region */ \ { \ const int curIdx = coordToIdx( gridWidth, loc ); \ value = dGrid[ curIdx ]; \ } // Read pairs from grid, one thread per row // Invoked twice: // 1: Count tetra // 2: Read tetra __global__ void kerReadGridPairs ( const int* dGrid, int gridWidth, KerInsertionData worksetData, KernelMode mode ) { // 8 voxels and their Voronoi vertices const int Root = 0; const int Opp = 7; const int LinkNum = 6; const int LinkVertex[ LinkNum + 1 ] = { 6, 2, 3, 1, 5, 4, 6 }; int3 loc = ( blockIdx.x <= gridWidth ) ? make_int3( threadIdx.x - 1, blockIdx.x - 1, -1 ) : make_int3( gridWidth - 1, threadIdx.x - 1, -1 ); // Read row on other side of grid const int curThreadIdx = getCurThreadIdx(); const int pairIdxBeg = ( CountPerThreadPairs == mode ) ? 0 : worksetData._starVertMap[ curThreadIdx ]; int curPairIdx = pairIdxBeg; int vals[8]; int valIdx = 0; //// // Read one plane (4 voxels) in this row //// for ( int iy = 0; iy <= 1; ++iy ) { for ( int ix = 0; ix <= 1; ++ix ) { const int3 curLoc = make_int3( loc.x + ix, loc.y + iy, loc.z ); READ_GRID_VALUE( dGrid, gridWidth, curLoc, vals[ valIdx ] ); ++valIdx; } } //// // Move along row, using plane (4 voxels) from last read //// // Move along row for ( ; loc.z < gridWidth; ++loc.z ) { // Read 8 voxels around this voxel valIdx = 4; for ( int iy = 0; iy <= 1; ++iy ) { for ( int ix = 0; ix <= 1; ++ix ) { const int3 curLoc = make_int3( loc.x + ix, loc.y + iy, loc.z + 1 ); READ_GRID_VALUE( dGrid, gridWidth, curLoc, vals[ valIdx ] ); ++valIdx; } } // Check the main diagonal const int rootVal = vals[ Root ]; const int oppVal = vals[ Opp ]; const bool hasMarker = ( rootVal == Marker ) || ( oppVal == Marker ); if ( rootVal != oppVal ) { // Check 6 link pairs bool hasQuad = false; int aVal = vals[ LinkVertex[ 0 ] ]; for ( int vi = 0; vi < LinkNum; ++vi ) { const int bVal = vals[ LinkVertex[ vi + 1 ] ]; if ( ( aVal != bVal ) && ( aVal != rootVal ) && ( aVal != oppVal ) && ( bVal != rootVal ) && ( bVal != oppVal ) ) { // Just count if ( CountPerThreadPairs == mode ) { // 10 pairs normally, 6 pairs if either Root or Opp is Marker curPairIdx += ( hasMarker ? 6 : 10 ); } // Just read else { grabPair( worksetData, rootVal, aVal, curPairIdx ); grabPair( worksetData, oppVal, aVal, curPairIdx ); grabPair( worksetData, rootVal, bVal, curPairIdx ); grabPair( worksetData, oppVal, bVal, curPairIdx ); grabPair( worksetData, aVal, bVal, curPairIdx ); } hasQuad = true; } aVal = bVal; } if ( hasQuad && !hasMarker ) // Has a quad { if ( CountPerThreadPairs == mode ) curPairIdx += 2; else grabPair( worksetData, rootVal, oppVal, curPairIdx ); } } // Store plane for next row vals[ 0 ] = vals[ 4 ]; vals[ 1 ] = vals[ 5 ]; vals[ 2 ] = vals[ 6 ]; vals[ 3 ] = vals[ 7 ]; } // Write count of thread if ( CountPerThreadPairs == mode ) worksetData._vertArr[ curThreadIdx ] = curPairIdx - pairIdxBeg; return; } // (1) Mark starving worksets // (2) Estimate workset sizes for all (including starving) __global__ void kerMarkStarvingWorksets ( KerInsertionData worksetData, KerIntArray markArr, KerIntArray worksetSizeArr ) { // Iterate stars for ( int idx = getCurThreadIdx(); idx < worksetData._starNum; idx += getThreadNum() ) { const int setBeg = worksetData._starVertMap[ idx ]; int setEnd = ( ( idx + 1 ) < worksetData._starNum ) ? worksetData._starVertMap[ idx + 1 ] : worksetData._vertNum; if ( ( -1 == setBeg ) || ( -1 == setEnd ) ) setEnd = setBeg; const int setSize = setEnd - setBeg; CudaAssert( ( setSize >= 0 ) && "Invalid set size!" ); //CudaAssert( ( setSize >= 3 ) && "Cannot be less than 3!" ); // Check if starving if ( ( 0 < setSize ) && ( setSize < MinWorksetSize ) ) { markArr._arr[ idx ] = MinWorksetSize - setSize; // Mark worksetSizeArr._arr[ idx ] = MinWorksetSize; } // Not starving else { markArr._arr[ idx ] = 0; worksetSizeArr._arr[ idx ] = setSize; } } return; } __device__ bool isCandidateInSet ( int vert, KerIntArray vertArr, int vertBeg, int vertNum, int candidateVert ) { if ( vert == candidateVert ) return true; for ( int idx = 0; idx < vertNum; ++idx ) { if ( candidateVert == vertArr._arr[ vertBeg + idx ] ) return true; } return false; } __global__ void kerPumpWorksets ( KerInsertionData worksetData, KerIntArray newMapArr, KerIntArray newVertArr ) { // Iterate stars for ( int star = getCurThreadIdx(); star < worksetData._starNum; star += getThreadNum() ) { const int fromVertBeg = worksetData._starVertMap[ star ]; int fromVertEnd = ( star < ( worksetData._starNum - 1 ) ) ? worksetData._starVertMap[ star + 1 ] : worksetData._vertNum; if ( ( fromVertBeg == -1 ) || ( fromVertEnd == -1 ) ) fromVertEnd = fromVertBeg; const int fromVertNum = fromVertEnd - fromVertBeg; const int toVertBeg = newMapArr._arr[ star ]; const int toVertEnd = ( star < ( worksetData._starNum - 1 ) ) ? newMapArr._arr[ star + 1 ] : newVertArr._num; const int toVertNum = toVertEnd - toVertBeg; // Copy over int toSize = 0; for ( int fromIdx = fromVertBeg; fromIdx < fromVertEnd; ++fromIdx ) { newVertArr._arr[ toVertBeg + toSize ] = worksetData._vertArr[ fromIdx ]; ++toSize; } bool isPumping = ( fromVertNum < toVertNum ); if ( !isPumping ) continue; // For worksets that needs pumping, *pump* it! for ( int fromIdx = fromVertBeg; fromIdx < fromVertEnd; ++fromIdx ) { // Pick from the working set of a neighbor const int neighborVert = worksetData._vertArr[ fromIdx ]; const int neighborVertBeg = worksetData._starVertMap[ neighborVert ]; const int neighborVertEnd = ( neighborVert < ( worksetData._starNum - 1 ) ) ? worksetData._starVertMap[ neighborVert + 1 ] : worksetData._vertNum; for ( int candidateIdx = neighborVertBeg; candidateIdx < neighborVertEnd; ++candidateIdx ) { const int candidateVert = worksetData._vertArr[ candidateIdx ]; // Check if it's already there if ( !isCandidateInSet( star, newVertArr, toVertBeg, toSize, candidateVert ) ) { newVertArr._arr[ toVertBeg + toSize ] = candidateVert; ++toSize; if ( toSize >= toVertNum ) // Enough? { isPumping = false; break; } } } if ( !isPumping ) break; } // If that's still not enough, start considering vertices 0, 1,.... int starIdx = 0; while ( toSize < toVertNum ) { CudaAssert( ( starIdx < worksetData._starNum ) && "Not enough points" ); // Check if it's already there if ( !isCandidateInSet( star, newVertArr, toVertBeg, toSize, starIdx ) ) { // I don't want a missing point to get in here const int starBeg = worksetData._starVertMap[ starIdx ]; const int starEnd = ( starIdx < ( worksetData._starNum - 1 ) ) ? worksetData._starVertMap[ starIdx + 1 ] : worksetData._vertNum; if ( starBeg >= starEnd ) // Missing point continue; newVertArr._arr[ toVertBeg + toSize ] = starIdx; ++toSize; } ++starIdx; } } return; } // Given a list of sorted numbers (has duplicates and is non-contiguous) create a map // Note: The input map *should* already be initialized to -1 !!! // Guarantees: // (1) For a number that is in input list, its map value and its next number's map value will be correct // (2) For a number that is not in input list, either its map value is -1, the next one is -1, or size is 0 __global__ void kerMakeAllStarMap ( KerIntArray inArr, KerIntArray allStarMap, int starNum ) { const int curThreadIdx = getCurThreadIdx(); // Iterate input list of numbers for ( int idx = curThreadIdx; idx < inArr._num; idx += getThreadNum() ) { const int curVal = inArr._arr[ idx ]; const int nextVal = ( ( idx + 1 ) < inArr._num ) ? inArr._arr[ idx + 1 ] : starNum - 1; CudaAssert( ( curVal <= nextVal ) && "Input array of numbers is not sorted!" ); // Number changes at this index if ( curVal != nextVal ) { allStarMap._arr[ curVal + 1 ] = idx + 1; allStarMap._arr[ nextVal ] = idx + 1; } } if ( ( 0 == curThreadIdx ) && ( inArr._num > 0 ) ) { const int firstVal = inArr._arr[ 0 ]; allStarMap._arr[ firstVal ] = 0; // Zero index for first value in input list } return; } __global__ void kerMissingPointWorksetSize ( KerPointData pointData, KerInsertionData worksetData, const int* grid, int gridWidth, KerIntArray fromStarArr, KerIntArray newWorksetSizeArr ) { // Iterate through workset stars for ( int star = getCurThreadIdx(); star < worksetData._starNum; star += getThreadNum() ) { const int mapVal = worksetData._starVertMap[ star ]; int nextMapVal = ( star < ( worksetData._starNum - 1 ) ) ? worksetData._starVertMap[ star + 1 ] : worksetData._vertNum; if ( mapVal == -1 || nextMapVal == -1 ) nextMapVal = mapVal; const int starVertNum = nextMapVal - mapVal; // Check if non-missing point if ( starVertNum > 0 ) { fromStarArr._arr[ star ] = star; newWorksetSizeArr._arr[ star ] = starVertNum; } // Missing point else { //// // Convert point to grid location //// const Point3 point = pointData._pointArr[ star ]; const int3 gridCoord = { ( int ) point._p[0], ( int ) point._p[1], ( int ) point._p[2] }; const int gridIdx = coordToIdx( gridWidth, gridCoord ); //// // Get star and its workset at grid location //// CudaAssert( ( gridIdx >= 0 ) && ( gridIdx < ( gridWidth * gridWidth * gridWidth ) ) && "Point out of grid!" ); int winStar = grid[ gridIdx ]; const int winMapVal = worksetData._starVertMap[ winStar ]; int winNextMapVal = ( winStar < ( worksetData._starNum - 1 ) ) ? worksetData._starVertMap[ winStar + 1 ] : worksetData._vertNum; if ( ( -1 == winMapVal ) || ( -1 == winNextMapVal ) ) winNextMapVal = winMapVal; int winVertNum = winNextMapVal - winMapVal; CudaAssert( ( winMapVal >= 0 ) ); CudaAssert( ( winNextMapVal >= 0 ) ); CudaAssert( ( winVertNum > 0 ) && "Winning star workset cannot be empty!" ); //// // Write back new workset size //// if ( winVertNum == 0 ) // Even the winning star is missing, then pick arbitrary 4 points { winVertNum = MinWorksetSize; winStar = -1; } fromStarArr._arr[ star ] = winStar; newWorksetSizeArr._arr[ star ] = winVertNum; } } return; } __global__ void kerMakeMissingPointWorkset ( KerInsertionData worksetData, KerIntArray fromStarArr, KerIntArray newMap, KerIntArray newVertArr ) { // Iterate through stars for ( int star = getCurThreadIdx(); star < worksetData._starNum; star += getThreadNum() ) { const int newVertBeg = newMap._arr[ star ]; const int newVertEnd = ( star < ( worksetData._starNum - 1 ) ) ? newMap._arr[ star + 1 ] : newVertArr._num; const int newStarVertNum = newVertEnd - newVertBeg; CudaAssert( ( newStarVertNum >= MinWorksetSize ) && "Invalid size for star workset!" ); const int fromStar = fromStarArr._arr[ star ]; if ( fromStar >= 0 ) // We got a winning star { const int fromVertBeg = worksetData._starVertMap[ fromStar ]; const int fromVertEnd = ( fromStar < ( worksetData._starNum - 1 ) ) ? worksetData._starVertMap[ fromStar + 1 ] : worksetData._vertNum; const int fromStarVertNum = fromVertEnd - fromVertBeg; CudaAssert( ( fromStarVertNum == newStarVertNum ) && "Mismatching workset sizes!" ); // Copy from source to destination star for ( int idx = 0; idx < newStarVertNum; ++idx ) { const int fromIdx = fromVertBeg + idx; const int toIdx = newVertBeg + idx; newVertArr._arr[ toIdx ] = worksetData._vertArr[ fromIdx ]; } } else { CudaAssert( false && "Disconnected point." ); CudaAssert( ( newStarVertNum == MinWorksetSize ) && "Invalid size for star workset!" ); int startId = 0; // Create arbitrary workset for ( int idx = 0; idx < newStarVertNum; ++idx ) { if ( startId == star ) ++startId; newVertArr._arr[ newVertBeg + idx ] = startId; ++startId; } } } return; } __global__ void kerGetPerTriangleInsertion ( KerStarData starData, KerInsertionData insertData, KerIntArray activeStarArr, KerIntArray activeTriCountArr, KerIntArray activeTriArr, KerShortArray triInsNumArr ) { // Iterate only active stars for ( int idx = getCurThreadIdx(); idx < activeStarArr._num; idx += getThreadNum() ) { const int star = activeStarArr._arr[ idx ]; //// // Number of insertions //// const int insBeg = insertData._starVertMap[ star ]; int insEnd = ( star < ( starData._starNum - 1 ) ) ? insertData._starVertMap[ star + 1 ] : insertData._vertNum; if ( ( -1 == insBeg ) || ( -1 == insEnd ) ) insEnd = insBeg; const int insCount = insEnd - insBeg; //// // Walk through the triangles //// const StarInfo starInfo = starData.getStarInfo( star ); int activeTriIdx = activeTriCountArr._arr[ idx ]; // Iterate star triangles for ( int locTriIdx = 0; locTriIdx < starInfo._totalSize; ++locTriIdx ) { activeTriArr._arr[ activeTriIdx ] = starInfo.toGlobalTriIdx( locTriIdx ); // Store global index for reuse triInsNumArr._arr[ activeTriIdx ] = insCount; // Store insertion count ++activeTriIdx; } } return; } __device__ TriPositionEx getNextFreeTri ( KerStarData starData, StarInfo starInfo, TriPositionEx prevTriPos ) { // There must be a free triangle while ( true ) { starInfo.moveNextTri( prevTriPos ); const TriangleStatus status = starData.triStatusAt( prevTriPos ); if ( ( Free == status ) || ( Beneath == status ) ) return prevTriPos; } CudaAssert( false && "No free triangle found!" ); return prevTriPos; } // Find first valid triangle adjacent to hole boundary // There must be one! __device__ void findFirstHoleSegment ( KerStarData starData, StarInfo starInfo, TriPositionEx beneathTriPos, TriPositionEx& firstTriPos, int& firstVi, TriPositionEx& firstHoleTriPos ) { const TriangleOpp triOppFirst = starData.triOppAt( beneathTriPos ); //// // Use the cached beneath triangle and check if it is on hole boundary //// for ( int vi = 0; vi < 3; ++vi ) { const TriPositionEx oppTriPos = starInfo.locToTriPos( triOppFirst._opp[ vi ] ); const TriangleStatus status = starData.triStatusAt( oppTriPos ); if ( ( Free != status ) && ( Beneath != status ) ) // Found a hole edge { firstTriPos = oppTriPos; firstHoleTriPos = beneathTriPos; TriangleOpp& oppTriOpp = starData.triOppAt( oppTriPos ); firstVi = oppTriOpp.indexOfOpp( starInfo.toLocTriIdx( beneathTriPos ) ); return; } } //// // Iterate through triangles and find one that is on hole boundary //// for ( int locTriIdx = 0; locTriIdx < starInfo._totalSize; ++locTriIdx ) { const TriPositionEx triPos = starInfo.locToTriPos( locTriIdx ); const TriangleStatus status = starData.triStatusAt( triPos ); // Ignore beneath triangles if ( ( Free == status ) || ( Beneath == status ) ) continue; const TriangleOpp triOpp = starData.triOppAt( triPos ); // Iterate segments of beneath triangle for ( int vi = 0; vi < 3; ++vi ) { const TriPositionEx triOppPos = starInfo.locToTriPos( triOpp._opp[ vi ] ); const TriangleStatus status = starData.triStatusAt( triOppPos ); if ( Beneath == status ) // Found a hole edge { firstTriPos = triPos; firstVi = vi; firstHoleTriPos = triOppPos; return; } } } return; } __global__ void kerStitchPointToHole ( KerStarData starData, KerInsertionData insertData, KerIntArray activeStarArr, int insIdx ) { // Reset exact check needed flag if ( ( 0 == threadIdx.x ) && ( 0 == blockIdx.x ) ) { starData._flagArr[ ExactTriNum ] = 0; } // Iterate stars for ( int idx = getCurThreadIdx(); idx < activeStarArr._num; idx += getThreadNum() ) { const int star = activeStarArr._arr[ idx ]; const TriPosition beneathTri = starData._insStatusArr[ star ]; // No insertion needed OR this star is dead if ( -1 == beneathTri ) continue; // Move on const TriPositionEx beneathTriPos = decodeTriPos( beneathTri ); // Mark this as successful insertion (not drowned) const int insIdxBeg = insertData._starVertMap[ star ]; const int insLoc = insIdxBeg + insIdx; const int insVert = insertData._vertArr[ insLoc ]; //// // Find first hole segment //// TriPositionEx firstTriPos; TriPositionEx firstNewTriPos; int firstVi = -1; // Use the first hole triangle to store the first new triangle const StarInfo starInfo = starData.getStarInfo( star ); findFirstHoleSegment( starData, starInfo, beneathTriPos, firstTriPos, firstVi, firstNewTriPos ); //// // Check if insertion point killed star //// if ( -1 == firstVi ) { markStarDeath( starData._deathCertArr[ star ], insVert, beneathTri ); insertData._vertStarArr[ insLoc ] = star; // Mark so that we get death certificate later insertData._vertArr[ insLoc ] = flipVertex( insVert ); // Flip so that we can distinguish between drowned and dead items continue; // Move on } // Mark as successful insertion (not drowned) insertData._vertStarArr[ insLoc ] = -1; //// // First stitched triangle //// // Get the first two vertices of the hole TriPositionEx curTriPos = firstTriPos; const Triangle& curTri = starData.triangleAt( curTriPos ); const int firstVert = curTri._v[ ( firstVi + 1 ) % 3 ]; int curVi = ( firstVi + 2 ) % 3; int curVert = curTri._v[ curVi ]; // Stitch the first triangle const Triangle firstNewTri = { insVert, curVert, firstVert }; starData.triangleAt( firstNewTriPos ) = firstNewTri; starData.triStatusAt( firstNewTriPos ) = NewValidAndUnchecked; // Adjancency with opposite triangle TriangleOpp& firstNewTriOpp = starData.triOppAt( firstNewTriPos ); firstNewTriOpp._opp[ 0 ] = starInfo.toLocTriIdx( firstTriPos ); //// // Walk around hole, stitching rest of triangles //// TriPositionEx prevFreeTriPos = makeTriPosEx( starInfo._beg0 - 1, 0 ); TriPositionEx prevNewTriPos = firstNewTriPos; // Walk outside the hole in cw direction while ( curVert != firstVert ) { // Opposite triangle const TriangleOpp& curTriOpp = starData.triOppAt( curTriPos ); const TriPositionEx gloOppTriPos = starInfo.locToTriPos( curTriOpp._opp[ ( curVi + 2 ) % 3 ] ); const TriangleStatus status = starData.triStatusAt( gloOppTriPos ); CudaAssert( Free != status ); // Opposite triangle is valid (not in hole) if ( ( Beneath != status ) && ( NewValidAndUnchecked != status ) ) { // Continue moving const TriangleOpp& oppTriOpp = starData.triOppAt( gloOppTriPos ); const int oppVi = oppTriOpp.indexOfOpp( starInfo.toLocTriIdx( curTriPos ) ); curVi = ( oppVi + 2 ) % 3; curTriPos = gloOppTriPos; } // Hole triangle is found else { TriPositionEx newTriPos; // Find a free triangle if ( Beneath == status ) { newTriPos = gloOppTriPos; } else { newTriPos = getNextFreeTri( starData, starInfo, prevFreeTriPos ); prevFreeTriPos = newTriPos; } // Get the next vertex in the hole boundary const int oppVi = ( curVi + 2 ) % 3; const Triangle& curTri = starData.triangleAt( curTriPos ); const int nextVert = curTri._v[ ( curVi + 1 ) % 3 ]; // New triangle const int locNewTriIdx = starInfo.toLocTriIdx( newTriPos ); const Triangle newTri = { insVert, nextVert, curVert }; // Adjancency with opposite triangle TriangleOpp& curTriOpp = starData.triOppAt( curTriPos ); curTriOpp._opp[ oppVi ] = locNewTriIdx; TriangleOpp& newTriOpp = starData.triOppAt( newTriPos ); newTriOpp._opp[ 0 ] = starInfo.toLocTriIdx( curTriPos ); // Adjacency with previous new triangle TriangleOpp& prevTriOpp = starData.triOppAt( prevNewTriPos ); prevTriOpp._opp[2] = locNewTriIdx; newTriOpp._opp[1] = starInfo.toLocTriIdx( prevNewTriPos ); // Last hole triangle if ( nextVert == firstVert ) { TriangleOpp& firstTriOpp = starData.triOppAt( firstNewTriPos ); firstTriOpp._opp[1] = locNewTriIdx; newTriOpp._opp[2] = starInfo.toLocTriIdx( firstNewTriPos ); } // Store new triangle data starData.triangleAt( newTriPos ) = newTri; starData.triStarAt( newTriPos ) = star; starData.triStatusAt( newTriPos ) = NewValidAndUnchecked; // Prepare for next triangle prevNewTriPos = newTriPos; // Move to the next vertex curVi = ( curVi + 1 ) % 3; curVert = nextVert; } } } return; } __global__ void kerSetBeneathToFree ( KerStarData starData, KerIntArray activeTriArr, KerShortArray triInsNumArr ) { // Iterate triangles for ( int idx = getCurThreadIdx(); idx < activeTriArr._num; idx += getThreadNum() ) { // Ignore triangles that had no insertions if ( 0 == triInsNumArr._arr[ idx ] ) continue; const int triIdx = activeTriArr._arr[ idx ]; const TriPositionEx triPos = starData.globToTriPos( triIdx ); TriangleStatus& triStatus = starData.triStatusAt( triPos ); // Check if beneath triangle if ( Beneath == triStatus ) { const int star = starData.triStarAt( triPos ); // Check if star of triangle is alive if ( Alive == getDeathStatus( starData._deathCertArr[ star ] ) ) triStatus = Free; } } return; } __global__ void kerCountPointsOfStar( KerStarData starData ) { // Iterate through stars for ( int star = getCurThreadIdx(); star < starData._starNum; star += getThreadNum() ) { // Ignore dead star if ( Alive != getDeathStatus( starData._deathCertArr[ star ] ) ) continue; // Ignore star with no insertion in this round if ( 0 == starData._insCountArr[ star ] ) continue; const StarInfo starInfo = starData.getStarInfo( star ); int validTriCount = starInfo._totalSize; for ( int locTriIdx = 0; locTriIdx < starInfo._totalSize; ++locTriIdx ) { const TriPositionEx triPos = starInfo.locToTriPos( locTriIdx ); const TriangleStatus triStatus = starData.triStatusAt( triPos ); CudaAssert( ( Beneath != triStatus ) && "No beneath triangle should have survived outside of insertion iterations!" ); if ( Free == triStatus ) --validTriCount; } // Get point count CudaAssert( ( 0 == ( ( validTriCount + 4 ) % 2 ) ) && "2-sphere triangle count not divisible by 2!" ); starData._pointNumArr[ star ] = ( validTriCount + 4 ) / 2; } return; } __device__ bool _checkStarHasVertex ( KerStarData starData, int star, int inVert ) { const StarInfo starInfo = starData.getStarInfo( star ); // Iterate triangles of star for ( int locTriIdx = 0; locTriIdx < starInfo._totalSize; ++locTriIdx ) { const TriPositionEx triPos = starInfo.locToTriPos( locTriIdx ); const TriangleStatus status = starData.triStatusAt( triPos ); const Triangle& tri = starData.triangleAt( triPos ); CudaAssert( ( Beneath != status ) && "Cannot be a dead star!" ); if ( ( Free != status ) && tri.hasVertex( inVert ) ) return true; } return false; } // Check if triangle v0-v1-v2 is in star // If not found, check if vertex in star // Update the vertex to -1 if it doesn't need to be inserted. // Return the number of insertion required, or -1 if consistent __device__ int _checkStarHasFacetOrPoint ( KerStarData starData, TriPositionEx fromTriLoc, int toIdxInFrom, int toStar, int& fromVert, int& v1, int& v2 ) { const StarInfo toStarInfo = starData.getStarInfo( toStar ); int hasVert = 0; // Iterate triangles of star for ( int toLocTriIdx = 0; toLocTriIdx < toStarInfo._totalSize; ++toLocTriIdx ) { const TriPositionEx triPos = toStarInfo.locToTriPos( toLocTriIdx ); const TriangleStatus status = starData.triStatusAt( triPos ); // Ignore free triangle if ( Free == status ) continue; CudaAssert( ( Beneath != status ) && "Star is dead! Cannot happen! Should have been ignored during facet generation!" ); // Check for vertex in triangle const Triangle tri = starData.triangleAt( triPos ); const int triHasVert = ( tri.hasVertex( fromVert ) ) + ( tri.hasVertex( v1 ) << 1 ) + ( tri.hasVertex( v2 ) << 2 ); hasVert |= triHasVert; if ( 0x7 == triHasVert ) // All three bits are 1 { // Consistent, note down the index starData.triOppTetraAt( fromTriLoc )._opp[ toIdxInFrom ] = toLocTriIdx; return -1; // Get out! } } if ( hasVert & 0x01 ) fromVert = -1; if ( hasVert & 0x02 ) v1 = -1; if ( hasVert & 0x04 ) v2 = -1; return 3 - __popc( hasVert ); // __popc gives the number of 1s in the binary representation } // First half of the facet processing logic __global__ void kerGetFacetInsertCount ( KerStarData starData, KerFacetData facetData ) { // Iterate facet items for ( int facetIdx = getCurThreadIdx(); facetIdx < facetData._num; facetIdx += getThreadNum() ) { // Read facet item const int toStar = facetData._toStarArr[ facetIdx ]; int& fromStar = facetData._fromStarArr[ facetIdx ]; // Will be modified below int insertPointNum = -1; // Drowned point OR dead star if ( facetIdx < facetData._drownedFacetNum ) { CudaAssert( ( toStar < 0 ) && "Drowned/dead to-star should be negative value!" ); // Reset to positive const int posToStar = flipVertex( toStar ); // Check if star was killed and its killer vert is in this pair if ( fromStar < 0 ) { CudaAssert( ( DeadNeedCert == getDeathStatus( starData._deathCertArr[ posToStar ] ) ) && "Killed star has to be in NeedCert state!" ); insertPointNum = 0; // No insertions, certificate is generated by another kernel } // Check if drowner star has just died else if ( Alive != getDeathStatus( starData._deathCertArr[ posToStar ] ) ) { insertPointNum = 0; } // Check if drowned point has died else if ( Alive != getDeathStatus( starData._deathCertArr[ fromStar ] ) ) { insertPointNum = 0; } // Both are not dead, check if drowner is still in link of drowned point else if ( _checkStarHasVertex( starData, fromStar, posToStar ) ) { insertPointNum = ProofPointsPerStar; } else { insertPointNum = 0; } } // Normal facet item else { CudaAssert( ( toStar >= 0 ) && ( fromStar >= 0 ) && "Invalid from or to stars!" ); Segment& seg = facetData._segArr[ facetIdx ]; // Will be modified below int triIdx = facetData._fromTriArr[ facetIdx ]; // Decode const int triStarIdx = triIdx & 0x03; triIdx >>= 2; //// // Check if to-star is already dead //// const int toDeathStatus = getDeathStatus( starData._deathCertArr[ toStar ] ); if ( Alive != toDeathStatus ) { insertPointNum = DeathCertSize; facetData._toStarArr[ facetIdx ] = flipVertex( toStar ); // Note down death by negating to-star } else { // Check if to-star has facet const TriPositionEx triPos = starData.globToTriPos( triIdx ); insertPointNum = _checkStarHasFacetOrPoint( starData, triPos, triStarIdx, toStar, fromStar, seg._v[0], seg._v[1] ); // Triangle not found in to-star if ( insertPointNum != -1 ) { // Raise the corresponding triangle as unchecked starData.triStatusAt( triPos ) = ValidAndUnchecked; } // Triangle found, is consistent else { insertPointNum = 0; } } } // Set status of facet facetData._insertMapArr[ facetIdx ] = insertPointNum; } return; } // Generate facet-item count only for valid triangles // Free and Beneath triangles are ignored __global__ void kerGetValidFacetCount ( KerStarData starData, KerIntArray countArr ) { const int FacetsPerTriangle = 3; // Iterate triangles for ( int triIdx = getCurThreadIdx(); triIdx < starData._totalTriNum; triIdx += getThreadNum() ) { const TriPositionEx triPos = starData.globToTriPos( triIdx ); const TriangleStatus triStatus = starData.triStatusAt( triPos ); const bool doCheckTri = ( ValidAndUnchecked == triStatus ) || ( NewValidAndUnchecked == triStatus ); countArr._arr[ triIdx ] = ( doCheckTri ? FacetsPerTriangle : 0 ); } return; } __global__ void kerMakeFacetFromDrownedPoint ( KerStarData starData, KerInsertionData insertData, KerFacetData facetData ) { // Iterate drowned points for ( int idx = getCurThreadIdx(); idx < facetData._drownedFacetNum; idx += getThreadNum() ) { // Insert drowned point disguised as facet item // *Swap* from-star and to-star so that later kernels are easy facetData._fromStarArr[ idx ] = insertData._vertArr[ idx ]; // Flip toStar so it appears at front of facet list after sorting facetData._toStarArr[ idx ] = flipVertex( insertData._vertStarArr[ idx ] ); } return; } __global__ void kerGetValidFacets ( KerStarData starData, KerIntArray facetMap, KerFacetData facetData ) { // Facet count begins here in count array const int facetCountBeg = facetData._drownedFacetNum; // Iterate triangles for ( int triIdx = getCurThreadIdx(); triIdx < starData._totalTriNum; triIdx += getThreadNum() ) { int facetIdxBeg = facetCountBeg + facetMap._arr[ triIdx ]; int facetIdxEnd = ( triIdx < ( starData._totalTriNum - 1 ) ) ? ( facetCountBeg + facetMap._arr[ triIdx + 1 ] ) : facetData._num; int facetIdx = facetIdxBeg; // Get out if beyond facets or FacetMax bound (whichever is smaller) if ( facetIdxBeg >= facetData._num ) break; // No need to check this triangle if ( facetIdxEnd == facetIdxBeg ) continue; //// // Add 3 facet items for this triangle //// const TriPositionEx triPos = starData.globToTriPos( triIdx ); TriangleStatus& triStatus = starData.triStatusAt( triPos ); const Triangle tri = starData.triangleAt( triPos ); const int star = starData.triStarAt( triPos ); CudaAssert( ( Free != triStatus ) && ( Beneath != triStatus ) ); // Set as valid since we are going to check it // Will be invalid again if found out during the check triStatus = Valid; // Iterate triangle vertices for ( int vi = 0; vi < 3; ++vi ) { const int toStar = tri._v[ vi ]; const Segment seg = { tri._v[ ( vi + 1 ) % 3 ], tri._v[ ( vi + 2 ) % 3 ] }; // Facet item facetData._fromStarArr[ facetIdx ] = star; facetData._toStarArr[ facetIdx ] = toStar; facetData._fromTriArr[ facetIdx ] = ( triIdx << 2 ) | vi; // Encode orientation facetData._segArr[ facetIdx ] = seg; ++facetIdx; } } return; } __global__ void kerGetPerFacetInsertions ( KerStarData starData, KerFacetData facetData, KerInsertionData insertData ) { // Iterate through non-drowned facets for ( int facetIdx = facetData._drownedFacetNum + getCurThreadIdx(); facetIdx < facetData._num; facetIdx += getThreadNum() ) { int insertIdxBeg = facetData._insertMapArr[ facetIdx ]; const int insertIdxEnd = ( facetIdx < ( facetData._num - 1 ) ) ? facetData._insertMapArr[ facetIdx + 1 ] : insertData._vertNum; // No insertions for this facet if ( insertIdxBeg == insertIdxEnd ) continue; //// // Point insertions //// const int toStar = facetData._toStarArr[ facetIdx ]; const int fromStar = facetData._fromStarArr[ facetIdx ]; // Set to -1 by kerGetFacetInsertCount if found in to-star const Segment seg = facetData._segArr[ facetIdx ]; // Set to -1 by kerGetFacetInsertCount if found in to-star const int triad[3] = { fromStar, seg._v[0], seg._v[1] }; // To-star is dead if ( toStar < 0 ) { const int posToStar = flipVertex( toStar ); CudaAssert( ( getDeathStatus( starData._deathCertArr[ posToStar ] ) >= 0 ) && "To-Star must be dead with certificate!" ); CudaAssert( ( DeathCertSize == ( insertIdxEnd - insertIdxBeg ) ) && "Invalid insertion size for to-star death!" ); const DeathCert& deathCert = starData._deathCertArr[ posToStar ]; //// // Store death certificate as insertions //// for ( int i = 0; i < DeathCertSize; ++i ) { const int certVert = deathCert._v[ i ]; insertData._vertStarArr[ insertIdxBeg ] = fromStar; insertData._vertArr[ insertIdxBeg ] = flipVertex( certVert ); // Negate to check if in star later ++insertIdxBeg; } } // To-star is alive else { for ( int i = 0; i < 3; ++i ) { const int vert = triad[i]; // Vertex not in to-star if ( vert != -1 ) { // Insert this point since it is not there yet insertData._vertStarArr[ insertIdxBeg ] = toStar; insertData._vertArr[ insertIdxBeg ] = vert; ++insertIdxBeg; } } CudaAssert( ( insertIdxBeg == insertIdxEnd ) && "Invalid number of insertions for PointInconsistent" ); } } return; } // Insertions that do not pass inspection remain negative, and are removed during compaction __global__ void kerCheckCertInsertions ( KerStarData starData, KerInsertionData insertData ) { // Iterate insertions for ( int idx = getCurThreadIdx(); idx < insertData._vertNum; idx += getThreadNum() ) { const int insVert = insertData._vertArr[ idx ]; // Ignore vertices that need no inspection // Note: Only death certificate insertions need inspection if ( insVert >= 0 ) continue; const int insStar = insertData._vertStarArr[ idx ]; const int posInsVert = flipVertex( insVert ); // 1: Ensure insert vertex is not same as star if ( insStar == posInsVert ) continue; // 2: Ensure insert vertex is not in link of star if ( _checkStarHasVertex( starData, insStar, posInsVert ) ) continue; // Make insert vertex positive, so it is not removed during compaction insertData._vertArr[ idx ] = posInsVert; } return; } __global__ void kerCountPerStarInsertions ( KerStarData starData, KerInsertionData insertData ) { for ( int star = getCurThreadIdx(); star < starData._starNum; star += getThreadNum() ) { //// // Update point number to be inserted AND drowned number //// const int insBeg = insertData._starVertMap[ star ]; int insEnd = ( star < ( starData._starNum - 1 ) ) ? insertData._starVertMap[ star + 1 ] : insertData._vertNum; if ( ( -1 == insBeg ) || ( -1 == insEnd ) ) insEnd = insBeg; const int insPointNum = insEnd - insBeg; CudaAssert( ( insPointNum >= 0 ) && "Invalid indices!" ); // Insert point count for this star starData._insCountArr[ star ] = insPointNum; //// // Drowned count for this star // Given star of n link points and m insertion points, only a maximum // of (n + m - 4) points can drown //// const int starPointNum = starData._pointNumArr[ star ]; const int totalPointNum = starPointNum + insPointNum; // Update star point count starData._pointNumArr[ star ] = totalPointNum; } return; } __global__ void kerComputeTriangleCount ( KerStarData starData, KerIntArray triNumArr ) { const float ExpandFactor = 1.0f; // Iterate through stars for ( int star = getCurThreadIdx(); star < starData._starNum; star += getThreadNum() ) { // Current number of triangles const StarInfo starInfo = starData.getStarInfo( star ); const int curTriNum = starInfo._totalSize; // Expected number of points (current + expected insertions) const int expPointNum = starData._pointNumArr[ star ]; // Expected number of triangles const int insTriNum = get2SphereTriangleNum( 1, expPointNum ); const int newTriNum = insTriNum * ExpandFactor; triNumArr._arr[ star ] = max( newTriNum, curTriNum ) - starInfo._size0; // Only "expand" second array } return; } __global__ void kerMarkOwnedTriangles ( KerStarData starData, KerIntArray tetraTriMap ) { // Iterate triangles for ( int triIdx = getCurThreadIdx(); triIdx < starData._totalTriNum; triIdx += getThreadNum() ) { const TriPositionEx triPos = starData.globToTriPos( triIdx ); const TriangleStatus status = starData.triStatusAt( triPos ); if ( ( Free == status ) || ( Beneath == status ) ) continue; // Check if star is triangle owner const Triangle tri = starData.triangleAt( triPos ); const int star = starData.triStarAt( triPos ); if ( ( star < tri._v[0] ) & ( star < tri._v[1] ) & ( star < tri._v[2] ) ) tetraTriMap._arr[ triIdx ] = triIdx; } return; } __global__ void kerGrabTetrasFromStars ( KerStarData starData, KerTetraData tetraData, KerIntArray validArr, KerIntArray triTetraMap, KerIntArray tetraTriMap, KerIntArray compressedIndex, int tetraNum ) { // Iterate list of all triangles for ( int idx = getCurThreadIdx(); idx < tetraNum; idx += getThreadNum() ) { if ( 0 == validArr._arr[ idx ] ) continue; // Construct the 4 vertices of the tetra const TriPositionEx triPos = starData.globToTriPos( tetraTriMap._arr[ idx ] ); const int star = starData.triStarAt( triPos ); const StarInfo starInfo = starData.getStarInfo( star ); const Triangle tri = starData.triangleAt( triPos ); CudaAssert( Valid == starData.triStatusAt( triPos ) ); Tetrahedron tetra; const int v0 = tri._v[0]; tetra._v[0] = v0; tetra._v[1] = tri._v[1]; tetra._v[2] = tri._v[2]; tetra._v[3] = star; const TriangleOpp triOpp = starData.triOppAt( triPos ); for ( int vi = 0; vi < 3; ++vi ) { const int gloOppTriIdx = starInfo.toGlobalTriIdx( triOpp._opp[ vi ] ); const int oppTetraIdx = triTetraMap._arr[ gloOppTriIdx ]; tetra._opp[ vi ] = ( -1 == oppTetraIdx ) ? -1 : compressedIndex._arr[ oppTetraIdx ]; } // Locate the triangle in _v[ 0 ]'s star const TriOppTetra& oppTetra = starData.triOppTetraAt( triPos ); const StarInfo v0Info = starData.getStarInfo( v0 ); const TriPositionEx v0TriPos = v0Info.locToTriPos( oppTetra._opp[ 0 ] ); // Find the tetra opposite _v[ 3 ] const Triangle v0tri = starData.triangleAt( v0TriPos ); const int starIdx = v0tri.indexOfVert( star ); const TriangleOpp& v0triOpp = starData.triOppAt( v0TriPos ); const int starTriOppIdx = v0Info.toGlobalTriIdx( v0triOpp._opp[ starIdx ] ); const int starOppTetraIdx = triTetraMap._arr[ starTriOppIdx ]; tetra._opp[ 3 ] = ( -1 == starOppTetraIdx ) ? -1 : compressedIndex._arr[ starOppTetraIdx ]; // Write tetra tetraData._arr[ compressedIndex._arr[ idx ] ] = tetra; } return; } __global__ void kerInvalidateFreeTriangles( KerStarData starData ) { // Iterate through triangles for ( int triIdx = getCurThreadIdx(); triIdx < starData._totalTriNum; triIdx += getThreadNum() ) { const TriPositionEx triPos = starData.globToTriPos( triIdx ); const TriangleStatus triStatus = starData.triStatusAt( triPos ); if ( ( Free == triStatus ) || ( Beneath == triStatus ) ) { Triangle& tri = starData.triangleAt( triPos ); // Invalidate triangle, so it can be detected during consistency check tri._v[0] = -1; } } return; } __global__ void kerCheckStarConsistency( KerStarData starData ) { // Iterate all triangles for ( int triIdx = getCurThreadIdx(); triIdx < starData._totalTriNum; triIdx += getThreadNum() ) { //// // Ignore free triangles //// const TriPositionEx triPos = starData.globToTriPos( triIdx ); TriangleStatus& triStatus = starData.triStatusAt( triPos ); if ( ( Free == triStatus ) || ( Beneath == triStatus ) ) continue; CudaAssert( ( Valid == triStatus ) && "Triangles should be valid at this stage!" ); //// // Check if triangle is consistent //// const int star = starData.triStarAt( triPos ); const Triangle tri = starData.triangleAt( triPos ); const TriOppTetra triOppTetra = starData.triOppTetraAt( triPos ); for ( int vi = 0; vi < 3; ++vi ) { const int toVert = tri._v[ vi ]; const StarInfo starInfo = starData.getStarInfo( toVert ); const TriPositionEx oppTriPos = starInfo.locToTriPos( triOppTetra._opp[ vi ] ); const Triangle oppTri = starData.triangleAt( oppTriPos ); // Check if triangle matches // Note: Free-Beneath triangle._v[0] has been set to -1, so they fail const bool consistent = oppTri.hasVertex( star ) && oppTri.hasVertex( tri._v[ ( vi + 1 ) % 3 ] ) && oppTri.hasVertex( tri._v[ ( vi + 2 ) % 3 ] ); if ( !consistent ) { starData._flagArr[ ExactTriNum ] = 1; triStatus = ValidAndUnchecked; break; } } } return; } __global__ void kerAppendValueToKey( KerInsertionData insertData, int bitsPerValue ) { const int ValMask = 1 << bitsPerValue; // Iterate array for ( int idx = getCurThreadIdx(); idx < insertData._vertNum; idx += getThreadNum() ) { const int key = insertData._vertStarArr[ idx ]; const int val = insertData._vertArr[ idx ]; CudaAssert( ( key >= 0 ) && "Invalid key!" ); insertData._vertStarArr[ idx ] = ( ( key << bitsPerValue ) | ( val & ( ValMask - 1 ) ) ); } return; } __global__ void kerRemoveValueFromKey( KerInsertionData insertData, int bitsPerValue ) { // Iterate array for ( int idx = getCurThreadIdx(); idx < insertData._vertNum; idx += getThreadNum() ) { const int keyvalue = insertData._vertStarArr[ idx ]; CudaAssert( ( keyvalue >= 0 ) && "Key-Value is invalid!" ); insertData._vertStarArr[ idx ] = ( keyvalue >> bitsPerValue ); } return; } __global__ void kerMarkDuplicates( KerIntArray keyArr, KerIntArray valueArr ) { // Iterate array for ( int idx = getCurThreadIdx(); idx < keyArr._num; idx += getThreadNum() ) { const int key = keyArr._arr[ idx ]; const int val = valueArr._arr[ idx ]; int nextIdx = idx + 1; while ( nextIdx < keyArr._num ) { const int nextKey = keyArr._arr[ nextIdx ]; if ( nextKey != key ) break; const int nextVal = valueArr._arr[ nextIdx ]; if ( ( val == nextVal ) || ( val == flipVertex( nextVal ) ) ) { valueArr._arr[ idx ] = flipVertex( val ); break; } ++nextIdx; } } return; } // Make map AND also update triStar and triStatus array __global__ void kerMakeOldNewTriMap ( KerStarData oldStarData, int oldTriNum, KerIntArray newTriMap, KerIntArray oldNewMap, KerIntArray newTriStar, KerTriStatusArray newTriStatus ) { // Iterate through triangles for ( int oldTriIdx = getCurThreadIdx(); oldTriIdx < oldTriNum; oldTriIdx += getThreadNum() ) { //// // Skip copying free triangle information //// const TriangleStatus status = oldStarData._triStatusArr[1][ oldTriIdx ]; if ( Free == status ) continue; //// // Make map //// const int starIdx = oldStarData._triStarArr[1][ oldTriIdx ]; // Star const int oldTriBeg = oldStarData._starTriMap[1][ starIdx ]; // Old location const int newTriBeg = newTriMap._arr[ starIdx ]; // New location const int newTriIdx = oldTriIdx - oldTriBeg + newTriBeg; // New location oldNewMap._arr[ oldTriIdx ] = newTriIdx; newTriStar._arr[ newTriIdx ] = starIdx; newTriStatus._arr[ newTriIdx ] = status; } return; } __global__ void kerGetActiveTriCount ( KerStarData starData, KerIntArray activeStarArr, KerIntArray activeTriCountArr ) { // Iterate stars for ( int idx = getCurThreadIdx(); idx < activeStarArr._num; idx += getThreadNum() ) { const int star = activeStarArr._arr[ idx ]; const StarInfo starInfo = starData.getStarInfo( star ); activeTriCountArr._arr[ idx ] = starInfo._totalSize; } return; } ////////////////////////////////////////////////////////////////////////////////
331628e0fb44211a2c7e3bd7421790382bc96304.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include "image_template.h" #include <math.h> #include <sys/time.h> #define T 16 //A consolidated function that creates both the Gaussian kernel and the derivative kernel void create_gaussians(float **gaussian_kernel,float **gaussian_deriv,int k_width,float sigma) { int i; float sum=0; int a=k_width/2; *gaussian_kernel=(float *)malloc(sizeof(float)*k_width); *gaussian_deriv=(float *)malloc(sizeof(float)*k_width); //Create kernel sum=0; for(i=0;i<k_width;i++) { (*gaussian_kernel)[i]=exp((-1*(i-a)*(i-a))/(2*sigma*sigma)); sum+=(*gaussian_kernel)[i]; } for(i=0;i<k_width;i++) (*gaussian_kernel)[i]/=sum; //Create derivative sum=0; for(i=0;i<k_width;i++) { (*gaussian_deriv)[i]=-1*(i-a)*exp((-1*(i-a)*(i-a))/(2*sigma*sigma)); sum-=i*((*gaussian_deriv)[i]); } for(i=0;i<k_width;i++) (*gaussian_deriv)[i]/=sum; } __global__ void convolve_v(float *in_image, int width, int height, float *mask, int mask_width, float *out_image) { extern __shared__ float Ashared[]; float sum; int offsetj; int i=blockIdx.x*blockDim.x+threadIdx.x; int j=blockIdx.y*blockDim.y+threadIdx.y; Ashared[threadIdx.x*blockDim.x+threadIdx.y] = in_image[i*width+j]; __syncthreads(); sum=0; for(int k=0;k<mask_width;k++) { offsetj = (-1 * floor((double)(mask_width/2)) + k); if(threadIdx.y+offsetj>=0 && threadIdx.y+offsetj<blockDim.y) { sum+=(float)Ashared[threadIdx.x*blockDim.x+threadIdx.y+offsetj]*mask[k]; } else //have faith that global accesses core cache { if(j+offsetj>=0 && j+offsetj<width) { sum+= in_image[i * width + j + offsetj] * mask[k]; } } } out_image[i*width+j]=sum; } __global__ void convolve_h(float *in_image, int width, int height, float *mask, int mask_width, float *out_image) { extern __shared__ float Ashared[]; float sum; int offseti; int i=blockIdx.x*blockDim.x+threadIdx.x; int j=blockIdx.y*blockDim.y+threadIdx.y; Ashared[threadIdx.x*blockDim.x+threadIdx.y] = (float)in_image[i*width+j]; __syncthreads(); sum=0; for(int k=0;k<mask_width;k++) { offseti = (-1 * floor((double)(mask_width/2)) + k); if(threadIdx.x+offseti>=0 && threadIdx.x+offseti<blockDim.x) { sum+=(float)Ashared[(threadIdx.x + offseti) * blockDim.x +threadIdx.y]*mask[k]; } else //have faith that global accesses cache { if(i+offseti>=0 && i+offseti<height) //Have faith in L2 but make sure data is there! sum+= in_image[(i + offseti) * width + j] * mask[k]; } } __syncthreads(); out_image[i*width+j]=(float)sum; } int main(int argc, char **argv) { //Declare all of the variable here float *org_img; //GPU device buffer for original image float *d_org_img; //CPU host buffers for the final output float *vertical_gradient, *horizontal_gradient; //GPU buffers for the final result float *d_vertical_gradient, *d_horizontal_gradient; //GPU buffers to hold intermediate convolution results float *d_temp_horizontal, *d_temp_vertical; //CPU host buffers to store the convolution masks float *gaussian_kernel, *gaussian_deriv; //GPU device buffers to store the convolution masks float *d_gaussian_kernel, *d_gaussian_deriv; int width,height,k_width; float sigma,a; struct timeval start,end; if(argc!=3) { printf("\n The correct argument list is: exec <image file> <Sigma> \n"); exit(0); } //obtain the parameters sigma=atof(argv[2]); a=ceil((float)(2.5*sigma-0.5)); k_width=2*a+1; //CPU portion of the code that reads/prepares the input data read_image_template<float>(argv[1],&org_img,&width,&height); //Computation starts here gettimeofday(&start,NULL); create_gaussians(&gaussian_kernel,&gaussian_deriv,k_width,sigma); horizontal_gradient=(float *)malloc(sizeof(float)*width*height); vertical_gradient=(float *)malloc(sizeof(float)*width*height); // CPU host mallocs for GPU buffers hipMalloc((void **)&d_org_img,sizeof(float)*width*height); hipMalloc((void **)&d_temp_horizontal,sizeof(float)*width*height); hipMalloc((void **)&d_temp_vertical,sizeof(float)*width*height); hipMalloc((void **)&d_horizontal_gradient,sizeof(float)*width*height); hipMalloc((void **)&d_vertical_gradient,sizeof(float)*width*height); hipMalloc((void **)&d_gaussian_kernel,sizeof(float)*k_width); hipMalloc((void **)&d_gaussian_deriv,sizeof(float)*k_width); //Offload all of the data to GPU device for convolution hipMemcpy(d_org_img,org_img,sizeof(float)*width*height,hipMemcpyHostToDevice); hipMemcpy(d_gaussian_kernel,gaussian_kernel,sizeof(float)*k_width,hipMemcpyHostToDevice); hipMemcpy(d_gaussian_deriv,gaussian_deriv,sizeof(float)*k_width,hipMemcpyHostToDevice); //Horizontal gradient. vertical kernel then horizontal derivative int block_dim=16; dim3 dimGrid(ceil(height/block_dim),ceil(width/block_dim),1); dim3 dimBlock(block_dim,block_dim,1); hipLaunchKernelGGL(( convolve_v), dim3(dimGrid),dim3(dimBlock), sizeof(float)*T*T, 0, d_org_img,width,height,d_gaussian_kernel,k_width,d_temp_horizontal); hipLaunchKernelGGL(( convolve_h), dim3(dimGrid),dim3(dimBlock), sizeof(float)*T*T, 0, d_temp_horizontal,width,height,d_gaussian_deriv,k_width,d_horizontal_gradient); //Vertical gradient. horizontal kernel then vertical derivative hipLaunchKernelGGL(( convolve_h), dim3(dimGrid),dim3(dimBlock), sizeof(float)*T*T, 0, d_org_img,width,height,d_gaussian_kernel,k_width,d_temp_vertical); hipLaunchKernelGGL(( convolve_v), dim3(dimGrid),dim3(dimBlock), sizeof(float)*T*T, 0, d_temp_vertical,width,height,d_gaussian_deriv,k_width,d_vertical_gradient); //GPU to Host transfer of the final result hipMemcpy(horizontal_gradient,d_horizontal_gradient,sizeof(float)*width*height,hipMemcpyDeviceToHost); hipMemcpy(vertical_gradient,d_vertical_gradient,sizeof(float)*width*height,hipMemcpyDeviceToHost); write_image_template<float>((char *)("horizontal_gradient.pgm"),horizontal_gradient,width,height); write_image_template<float>((char *)("vertical_gradient.pgm"),vertical_gradient,width,height); hipDeviceSynchronize(); gettimeofday(&end,NULL); printf("%d, %ld\n", width, ((end.tv_sec * 1000 + end.tv_usec/1000) - (start.tv_sec * 1000 + start.tv_usec/1000))); //free variables free(org_img); free(horizontal_gradient); free(vertical_gradient); free(gaussian_kernel); free(gaussian_deriv); hipFree(d_org_img); hipFree(d_gaussian_kernel); hipFree(d_gaussian_deriv); hipFree(d_temp_horizontal); hipFree(d_temp_vertical); hipFree(d_vertical_gradient); hipFree(d_horizontal_gradient); return 0; }
331628e0fb44211a2c7e3bd7421790382bc96304.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #include "image_template.h" #include <math.h> #include <sys/time.h> #define T 16 //A consolidated function that creates both the Gaussian kernel and the derivative kernel void create_gaussians(float **gaussian_kernel,float **gaussian_deriv,int k_width,float sigma) { int i; float sum=0; int a=k_width/2; *gaussian_kernel=(float *)malloc(sizeof(float)*k_width); *gaussian_deriv=(float *)malloc(sizeof(float)*k_width); //Create kernel sum=0; for(i=0;i<k_width;i++) { (*gaussian_kernel)[i]=exp((-1*(i-a)*(i-a))/(2*sigma*sigma)); sum+=(*gaussian_kernel)[i]; } for(i=0;i<k_width;i++) (*gaussian_kernel)[i]/=sum; //Create derivative sum=0; for(i=0;i<k_width;i++) { (*gaussian_deriv)[i]=-1*(i-a)*exp((-1*(i-a)*(i-a))/(2*sigma*sigma)); sum-=i*((*gaussian_deriv)[i]); } for(i=0;i<k_width;i++) (*gaussian_deriv)[i]/=sum; } __global__ void convolve_v(float *in_image, int width, int height, float *mask, int mask_width, float *out_image) { extern __shared__ float Ashared[]; float sum; int offsetj; int i=blockIdx.x*blockDim.x+threadIdx.x; int j=blockIdx.y*blockDim.y+threadIdx.y; Ashared[threadIdx.x*blockDim.x+threadIdx.y] = in_image[i*width+j]; __syncthreads(); sum=0; for(int k=0;k<mask_width;k++) { offsetj = (-1 * floor((double)(mask_width/2)) + k); if(threadIdx.y+offsetj>=0 && threadIdx.y+offsetj<blockDim.y) { sum+=(float)Ashared[threadIdx.x*blockDim.x+threadIdx.y+offsetj]*mask[k]; } else //have faith that global accesses core cache { if(j+offsetj>=0 && j+offsetj<width) { sum+= in_image[i * width + j + offsetj] * mask[k]; } } } out_image[i*width+j]=sum; } __global__ void convolve_h(float *in_image, int width, int height, float *mask, int mask_width, float *out_image) { extern __shared__ float Ashared[]; float sum; int offseti; int i=blockIdx.x*blockDim.x+threadIdx.x; int j=blockIdx.y*blockDim.y+threadIdx.y; Ashared[threadIdx.x*blockDim.x+threadIdx.y] = (float)in_image[i*width+j]; __syncthreads(); sum=0; for(int k=0;k<mask_width;k++) { offseti = (-1 * floor((double)(mask_width/2)) + k); if(threadIdx.x+offseti>=0 && threadIdx.x+offseti<blockDim.x) { sum+=(float)Ashared[(threadIdx.x + offseti) * blockDim.x +threadIdx.y]*mask[k]; } else //have faith that global accesses cache { if(i+offseti>=0 && i+offseti<height) //Have faith in L2 but make sure data is there! sum+= in_image[(i + offseti) * width + j] * mask[k]; } } __syncthreads(); out_image[i*width+j]=(float)sum; } int main(int argc, char **argv) { //Declare all of the variable here float *org_img; //GPU device buffer for original image float *d_org_img; //CPU host buffers for the final output float *vertical_gradient, *horizontal_gradient; //GPU buffers for the final result float *d_vertical_gradient, *d_horizontal_gradient; //GPU buffers to hold intermediate convolution results float *d_temp_horizontal, *d_temp_vertical; //CPU host buffers to store the convolution masks float *gaussian_kernel, *gaussian_deriv; //GPU device buffers to store the convolution masks float *d_gaussian_kernel, *d_gaussian_deriv; int width,height,k_width; float sigma,a; struct timeval start,end; if(argc!=3) { printf("\n The correct argument list is: exec <image file> <Sigma> \n"); exit(0); } //obtain the parameters sigma=atof(argv[2]); a=ceil((float)(2.5*sigma-0.5)); k_width=2*a+1; //CPU portion of the code that reads/prepares the input data read_image_template<float>(argv[1],&org_img,&width,&height); //Computation starts here gettimeofday(&start,NULL); create_gaussians(&gaussian_kernel,&gaussian_deriv,k_width,sigma); horizontal_gradient=(float *)malloc(sizeof(float)*width*height); vertical_gradient=(float *)malloc(sizeof(float)*width*height); // CPU host mallocs for GPU buffers cudaMalloc((void **)&d_org_img,sizeof(float)*width*height); cudaMalloc((void **)&d_temp_horizontal,sizeof(float)*width*height); cudaMalloc((void **)&d_temp_vertical,sizeof(float)*width*height); cudaMalloc((void **)&d_horizontal_gradient,sizeof(float)*width*height); cudaMalloc((void **)&d_vertical_gradient,sizeof(float)*width*height); cudaMalloc((void **)&d_gaussian_kernel,sizeof(float)*k_width); cudaMalloc((void **)&d_gaussian_deriv,sizeof(float)*k_width); //Offload all of the data to GPU device for convolution cudaMemcpy(d_org_img,org_img,sizeof(float)*width*height,cudaMemcpyHostToDevice); cudaMemcpy(d_gaussian_kernel,gaussian_kernel,sizeof(float)*k_width,cudaMemcpyHostToDevice); cudaMemcpy(d_gaussian_deriv,gaussian_deriv,sizeof(float)*k_width,cudaMemcpyHostToDevice); //Horizontal gradient. vertical kernel then horizontal derivative int block_dim=16; dim3 dimGrid(ceil(height/block_dim),ceil(width/block_dim),1); dim3 dimBlock(block_dim,block_dim,1); convolve_v<<<dimGrid,dimBlock, sizeof(float)*T*T>>>(d_org_img,width,height,d_gaussian_kernel,k_width,d_temp_horizontal); convolve_h<<<dimGrid,dimBlock, sizeof(float)*T*T>>>(d_temp_horizontal,width,height,d_gaussian_deriv,k_width,d_horizontal_gradient); //Vertical gradient. horizontal kernel then vertical derivative convolve_h<<<dimGrid,dimBlock, sizeof(float)*T*T>>>(d_org_img,width,height,d_gaussian_kernel,k_width,d_temp_vertical); convolve_v<<<dimGrid,dimBlock, sizeof(float)*T*T>>>(d_temp_vertical,width,height,d_gaussian_deriv,k_width,d_vertical_gradient); //GPU to Host transfer of the final result cudaMemcpy(horizontal_gradient,d_horizontal_gradient,sizeof(float)*width*height,cudaMemcpyDeviceToHost); cudaMemcpy(vertical_gradient,d_vertical_gradient,sizeof(float)*width*height,cudaMemcpyDeviceToHost); write_image_template<float>((char *)("horizontal_gradient.pgm"),horizontal_gradient,width,height); write_image_template<float>((char *)("vertical_gradient.pgm"),vertical_gradient,width,height); cudaThreadSynchronize(); gettimeofday(&end,NULL); printf("%d, %ld\n", width, ((end.tv_sec * 1000 + end.tv_usec/1000) - (start.tv_sec * 1000 + start.tv_usec/1000))); //free variables free(org_img); free(horizontal_gradient); free(vertical_gradient); free(gaussian_kernel); free(gaussian_deriv); cudaFree(d_org_img); cudaFree(d_gaussian_kernel); cudaFree(d_gaussian_deriv); cudaFree(d_temp_horizontal); cudaFree(d_temp_vertical); cudaFree(d_vertical_gradient); cudaFree(d_horizontal_gradient); return 0; }
ce3f1f13e42642182d95b5ef85dad8514f6a87e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> // includes, project #include <cutil.h> // includes, kernels #include <scan_naive_kernel.cu> #include <scan_workefficient_kernel.cu> #include <scan_best_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); // regression test functionality extern "C" unsigned int compare( const float* reference, const float* data, const unsigned int len); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { CUT_DEVICE_INIT(); int num_elements = 512; cutGetCmdLineArgumenti( argc, (const char**) argv, "n", &num_elements); unsigned int timer; CUT_SAFE_CALL( cutCreateTimer(&timer)); const unsigned int num_threads = num_elements / 2; const unsigned int mem_size = sizeof( float) * num_elements; // padding space is used to avoid shared memory bank conflicts unsigned int extra_space = num_elements / NUM_BANKS; #ifdef ZERO_BANK_CONFLICTS extra_space += extra_space / NUM_BANKS; #endif const unsigned int shared_mem_size = sizeof(float) * (num_elements + extra_space); // allocate host memory to store the input data float* h_data = (float*) malloc( mem_size); // initialize the input data on the host to be integer values // between 0 and 1000 for( unsigned int i = 0; i < num_elements; ++i) { h_data[i] = floorf(1000*(rand()/(float)RAND_MAX)); } // compute reference solution float* reference = (float*) malloc( mem_size); computeGold( reference, h_data, num_elements); // allocate device memory input and output arrays float* d_idata; float* d_odata[3]; CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, mem_size)); CUDA_SAFE_CALL( hipMalloc( (void**) &(d_odata[0]), mem_size)); CUDA_SAFE_CALL( hipMalloc( (void**) &(d_odata[1]), mem_size)); CUDA_SAFE_CALL( hipMalloc( (void**) &(d_odata[2]), mem_size)); // copy host memory to device input array CUDA_SAFE_CALL( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) ); // setup execution parameters // Note that these scans only support a single thread-block worth of data, // but we invoke them here on many blocks so that we can accurately compare // performance #ifndef __DEVICE_EMULATION__ dim3 grid(256, 1, 1); #else dim3 grid(1, 1, 1); // only one run block in device emu mode or it will be too slow #endif dim3 threads(num_threads*2, 1, 1); // make sure there are no CUDA errors before we start CUT_CHECK_ERROR("Kernel execution failed"); printf("Running parallel prefix sum (scan) of %d elements\n", num_elements); printf("Comparing 3 versions:\n\n"); // execute the kernels unsigned int numIterations = 100; printf("1. scan_naive -- not work efficient (O(n log n) adds).\n"); cutStartTimer(timer); for (int i = 0; i < numIterations; ++i) { hipLaunchKernelGGL(( scan_naive), dim3(grid), dim3(threads), 2 * shared_mem_size , 0, d_odata[0], d_idata, num_elements); } hipDeviceSynchronize(); cutStopTimer(timer); printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); cutResetTimer(timer); threads.x /= 2; printf("2. scan_workefficient -- Work efficient (O(n) adds), but many bank conflicts.\n"); cutStartTimer(timer); for (int i = 0; i < numIterations; ++i) { hipLaunchKernelGGL(( scan_workefficient), dim3(grid), dim3(threads), shared_mem_size , 0, d_odata[1], d_idata, num_elements); } hipDeviceSynchronize(); cutStopTimer(timer); printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); cutResetTimer(timer); printf("3. scan_best -- work efficient with very few bank conflicts.\n"); cutStartTimer(timer); for (int i = 0; i < numIterations; ++i) { hipLaunchKernelGGL(( scan_best), dim3(grid), dim3(threads), shared_mem_size , 0, d_odata[2], d_idata, num_elements); } hipDeviceSynchronize(); cutStopTimer(timer); printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); cutResetTimer(timer); // check for any errors CUT_CHECK_ERROR("Kernel execution failed"); for (int i = 0; i < 3; ++i) // check all 3 results { // copy result from device to host CUDA_SAFE_CALL(hipMemcpy( h_data, d_odata[i], sizeof(float) * num_elements, hipMemcpyDeviceToHost)); // If this is a regression test write the results to a file if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression")) { // write file for regression test cutWriteFilef( "./data/result.dat", h_data, num_elements, 0.0); } else { // custom output handling when no regression test running // in this case check if the result is equivalent to the expected soluion // We can use an epsilon of 0 since values are integral and in a range // that can be exactly represented float epsilon = 0.0f; unsigned int result_regtest = cutComparefe( reference, h_data, num_elements, epsilon); char* names[] = {"scan_naive", "scan_workefficient", "scan_best"}; printf( "%s: Test %s\n", names[i], (1 == result_regtest) ? "PASSED" : "FAILED"); } } // cleanup memory free( h_data); free( reference); CUDA_SAFE_CALL(hipFree(d_idata)); CUDA_SAFE_CALL(hipFree(d_odata[0])); CUDA_SAFE_CALL(hipFree(d_odata[1])); CUDA_SAFE_CALL(hipFree(d_odata[2])); CUT_SAFE_CALL(cutDeleteTimer(timer)); }
ce3f1f13e42642182d95b5ef85dad8514f6a87e9.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> // includes, project #include <cutil.h> // includes, kernels #include <scan_naive_kernel.cu> #include <scan_workefficient_kernel.cu> #include <scan_best_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); // regression test functionality extern "C" unsigned int compare( const float* reference, const float* data, const unsigned int len); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { CUT_DEVICE_INIT(); int num_elements = 512; cutGetCmdLineArgumenti( argc, (const char**) argv, "n", &num_elements); unsigned int timer; CUT_SAFE_CALL( cutCreateTimer(&timer)); const unsigned int num_threads = num_elements / 2; const unsigned int mem_size = sizeof( float) * num_elements; // padding space is used to avoid shared memory bank conflicts unsigned int extra_space = num_elements / NUM_BANKS; #ifdef ZERO_BANK_CONFLICTS extra_space += extra_space / NUM_BANKS; #endif const unsigned int shared_mem_size = sizeof(float) * (num_elements + extra_space); // allocate host memory to store the input data float* h_data = (float*) malloc( mem_size); // initialize the input data on the host to be integer values // between 0 and 1000 for( unsigned int i = 0; i < num_elements; ++i) { h_data[i] = floorf(1000*(rand()/(float)RAND_MAX)); } // compute reference solution float* reference = (float*) malloc( mem_size); computeGold( reference, h_data, num_elements); // allocate device memory input and output arrays float* d_idata; float* d_odata[3]; CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, mem_size)); CUDA_SAFE_CALL( cudaMalloc( (void**) &(d_odata[0]), mem_size)); CUDA_SAFE_CALL( cudaMalloc( (void**) &(d_odata[1]), mem_size)); CUDA_SAFE_CALL( cudaMalloc( (void**) &(d_odata[2]), mem_size)); // copy host memory to device input array CUDA_SAFE_CALL( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) ); // setup execution parameters // Note that these scans only support a single thread-block worth of data, // but we invoke them here on many blocks so that we can accurately compare // performance #ifndef __DEVICE_EMULATION__ dim3 grid(256, 1, 1); #else dim3 grid(1, 1, 1); // only one run block in device emu mode or it will be too slow #endif dim3 threads(num_threads*2, 1, 1); // make sure there are no CUDA errors before we start CUT_CHECK_ERROR("Kernel execution failed"); printf("Running parallel prefix sum (scan) of %d elements\n", num_elements); printf("Comparing 3 versions:\n\n"); // execute the kernels unsigned int numIterations = 100; printf("1. scan_naive -- not work efficient (O(n log n) adds).\n"); cutStartTimer(timer); for (int i = 0; i < numIterations; ++i) { scan_naive<<< grid, threads, 2 * shared_mem_size >>> (d_odata[0], d_idata, num_elements); } cudaThreadSynchronize(); cutStopTimer(timer); printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); cutResetTimer(timer); threads.x /= 2; printf("2. scan_workefficient -- Work efficient (O(n) adds), but many bank conflicts.\n"); cutStartTimer(timer); for (int i = 0; i < numIterations; ++i) { scan_workefficient<<< grid, threads, shared_mem_size >>> (d_odata[1], d_idata, num_elements); } cudaThreadSynchronize(); cutStopTimer(timer); printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); cutResetTimer(timer); printf("3. scan_best -- work efficient with very few bank conflicts.\n"); cutStartTimer(timer); for (int i = 0; i < numIterations; ++i) { scan_best<<< grid, threads, shared_mem_size >>> (d_odata[2], d_idata, num_elements); } cudaThreadSynchronize(); cutStopTimer(timer); printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); cutResetTimer(timer); // check for any errors CUT_CHECK_ERROR("Kernel execution failed"); for (int i = 0; i < 3; ++i) // check all 3 results { // copy result from device to host CUDA_SAFE_CALL(cudaMemcpy( h_data, d_odata[i], sizeof(float) * num_elements, cudaMemcpyDeviceToHost)); // If this is a regression test write the results to a file if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression")) { // write file for regression test cutWriteFilef( "./data/result.dat", h_data, num_elements, 0.0); } else { // custom output handling when no regression test running // in this case check if the result is equivalent to the expected soluion // We can use an epsilon of 0 since values are integral and in a range // that can be exactly represented float epsilon = 0.0f; unsigned int result_regtest = cutComparefe( reference, h_data, num_elements, epsilon); char* names[] = {"scan_naive", "scan_workefficient", "scan_best"}; printf( "%s: Test %s\n", names[i], (1 == result_regtest) ? "PASSED" : "FAILED"); } } // cleanup memory free( h_data); free( reference); CUDA_SAFE_CALL(cudaFree(d_idata)); CUDA_SAFE_CALL(cudaFree(d_odata[0])); CUDA_SAFE_CALL(cudaFree(d_odata[1])); CUDA_SAFE_CALL(cudaFree(d_odata[2])); CUT_SAFE_CALL(cutDeleteTimer(timer)); }
d160c8f1219f501b27f054aebf4e01e41829edde.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CUAPI.h" #include "CUPOT.h" #if ( defined GPU && defined GRAVITY ) // Poisson solver prototypes #if ( POT_SCHEME == SOR ) #ifdef USE_PSOLVER_10TO14 __global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #else __global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #endif // #ifdef USE_PSOLVER_10TO14 ... else ... #elif ( POT_SCHEME == MG ) __global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const real dh_Min, const int Max_Iter, const int NPre_Smooth, const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme ); #endif // POT_SCHEME // Gravity solver prototypes #if ( MODEL == HYDRO ) __global__ void CUPOT_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ CUBE(PS1) ], const real g_Pot_Array_New[][ CUBE(GRA_NXT) ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_G) ], const real g_Flu_Array_USG[][GRA_NIN-1][ CUBE(PS1) ], char g_DE_Array [][ CUBE(PS1) ], const real g_Emag_Array [][ CUBE(PS1) ], const real dt, const real dh, const bool P5_Gradient, const OptGravityType_t GravityType, ExtAcc_t ExtAcc_Func, const double TimeNew, const double TimeOld, const real MinEint ); #elif ( MODEL == ELBDM ) __global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ PS1*PS1*PS1 ], const real g_Pot_Array[][ GRA_NXT*GRA_NXT*GRA_NXT ], const double g_Corner_Array[][3], const real EtaDt, const real dh, const real Lambda, const bool ExtPot, ExtPot_t ExtPot_Func, const double TimeNew ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL // declare all device pointers extern real (*d_Rho_Array_P )[ CUBE(RHO_NXT) ]; extern real (*d_Pot_Array_P_In )[ CUBE(POT_NXT) ]; extern real (*d_Pot_Array_P_Out)[ CUBE(GRA_NXT) ]; extern real (*d_Flu_Array_G )[GRA_NIN][ CUBE(PS1)]; extern double (*d_Corner_Array_G)[3]; #if ( MODEL == HYDRO ) #ifdef UNSPLIT_GRAVITY extern real (*d_Pot_Array_USG_G)[ CUBE(USG_NXT_G) ]; extern real (*d_Flu_Array_USG_G)[GRA_NIN-1][ CUBE(PS1) ]; #else static real (*d_Pot_Array_USG_G)[ CUBE(USG_NXT_G) ] = NULL; static real (*d_Flu_Array_USG_G)[GRA_NIN-1][ CUBE(PS1) ] = NULL; #endif #ifdef DUAL_ENERGY extern char (*d_DE_Array_G)[ CUBE(PS1) ]; #else static char (*d_DE_Array_G)[ CUBE(PS1) ] = NULL; #endif #ifdef MHD extern real (*d_Emag_Array_G)[ CUBE(PS1) ]; #else static real (*d_Emag_Array_G)[ CUBE(PS1) ] = NULL; #endif #endif // #if ( MODEL == HYDRO ) extern hipStream_t *Stream; //------------------------------------------------------------------------------------------------------- // Function : CUAPI_Asyn_PoissonGravitySolver // Description : Invoke the CUPOT_PoissonSolver_XXtoXXcube and/or CUPOT_GravitySolver kernel(s) to evaluate // the gravitational potential and/or advance the fluid variables by the gravitational // acceleration for a group of patches // // *********************************************************** // ** Asynchronous Function ** // ** ** // ** will return before the execution in GPU is complete ** // *********************************************************** // // Note : a. Use streams for the asychronous memory copy between device and host // b. Prefix "d" : for pointers pointing to the "Device" memory space // Prefix "h" : for pointers pointing to the "Host" memory space // // Parameter : h_Rho_Array : Host array storing the input density // h_Pot_Array_In : Host array storing the input "coarse-grid" potential for interpolation // h_Pot_Array_Out : Host array to store the output potential // h_Flu_Array : Host array to store the fluid variables for the Gravity solver // h_Corner_Array : Host array storing the physical corner coordinates of each patch // h_Pot_Array_USG : Host array storing the prepared potential for UNSPLIT_GRAVITY // h_Flu_Array_USG : Host array storing the prepared density + momentum for UNSPLIT_GRAVITY // h_DE_Array : Host array storing the dual-energy status (for both input and output) // h_Emag_Array : Host array storing the cell-centered magnetic energy (MHD only) // NPatchGroup : Number of patch groups evaluated simultaneously by GPU // dt : Time interval to advance solution // dh : Grid size // SOR_Min_Iter : Minimum # of iterations for SOR // SOR_Max_Iter : Maximum # of iterations for SOR // SOR_Omega : Over-relaxation parameter // MG_Max_Iter : Maximum number of iterations for multigrid // MG_NPre_Smooth : Number of pre-smoothing steps for multigrid // MG_NPos_tSmooth : Number of post-smoothing steps for multigrid // MG_Tolerated_Error : Maximum tolerated error for multigrid // Poi_Coeff : Coefficient in front of density in the Poisson equation (4*Pi*Newton_G*a) // IntScheme : Interpolation scheme for potential // --> currently supported schemes include // INT_CQUAD : conservative quadratic interpolation // INT_QUAD : quadratic interpolation // P5_Gradient : Use 5-points stencil to evaluate the potential gradient // ELBDM_Eta : Particle mass / Planck constant in ELBDM // ELBDM_Lambda : Quartic self-interaction coefficient in ELBDM // Poisson : true --> invoke the Poisson solver // GraAcc : true --> invoke the Gravity solver // GPU_NStream : Number of CUDA streams for the asynchronous memory copy // GravityType : Types of gravity --> self-gravity, external gravity, both // TimeNew : Physical time at the current step (for the external gravity solver) // TimeOld : Physical time at the previous step (for the external gravity solver in UNSPLIT_GRAVITY) // ExtPot : Add the external potential // MinEint : Internal energy floor // // Useless parameters in HYDRO : ELBDM_Eta, ELBDM_Lambda // Useless parameters in ELBDM : P5_Gradient //------------------------------------------------------------------------------------------------------- void CUAPI_Asyn_PoissonGravitySolver( const real h_Rho_Array [][RHO_NXT][RHO_NXT][RHO_NXT], const real h_Pot_Array_In [][POT_NXT][POT_NXT][POT_NXT], real h_Pot_Array_Out[][GRA_NXT][GRA_NXT][GRA_NXT], real h_Flu_Array [][GRA_NIN][PS1][PS1][PS1], const double h_Corner_Array[][3], const real h_Pot_Array_USG[][USG_NXT_G][USG_NXT_G][USG_NXT_G], const real h_Flu_Array_USG[][GRA_NIN-1][PS1][PS1][PS1], char h_DE_Array [][PS1][PS1][PS1], const real h_Emag_Array [][PS1][PS1][PS1], const int NPatchGroup, const real dt, const real dh, const int SOR_Min_Iter, const int SOR_Max_Iter, const real SOR_Omega, const int MG_Max_Iter, const int MG_NPre_Smooth, const int MG_NPost_Smooth, const real MG_Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme, const bool P5_Gradient, const real ELBDM_Eta, const real ELBDM_Lambda, const bool Poisson, const bool GraAcc, const int GPU_NStream, const OptGravityType_t GravityType, const double TimeNew, const double TimeOld, const bool ExtPot, const real MinEint ) { // model-independent constants # if ( POT_SCHEME == SOR ) const dim3 Poi_Block_Dim( RHO_NXT/2, RHO_NXT, POT_BLOCK_SIZE_Z ); # elif ( POT_SCHEME == MG ) const dim3 Poi_Block_Dim( POT_BLOCK_SIZE_X, 1, 1 ); # endif const dim3 Gra_Block_Dim( GRA_BLOCK_SIZE ); const int NPatch = NPatchGroup*8; # if ( POT_SCHEME == SOR ) const real Poi_Const = Poi_Coeff*dh*dh; const real SOR_Omega_6 = SOR_Omega/6.0; # endif // model-dependent constants # if ( MODEL == HYDRO ) # elif ( MODEL == ELBDM ) const real ELBDM_EtaDt = ELBDM_Eta*dt; # else # error : ERROR : unsupported MODEL !! # endif // check # if ( MODEL == ELBDM && !defined STORE_POT_GHOST && GRA_GHOST_SIZE != 0 ) # warning : WARNING : GRA_GHOST_SIZE != 0 in ELBDM (without STORE_POT_GHOST) !! # endif # ifdef GAMER_DEBUG const int Poi_NThread = Poi_Block_Dim.x * Poi_Block_Dim.y * Poi_Block_Dim.z; // minimum number of threads for spatial interpolation if ( Poisson && Poi_NThread < (POT_NXT-2)*(POT_NXT-2) ) Aux_Error( ERROR_INFO, "Poi_NThread (%d) < (POT_NXT-2)*(POT_NXT-2) (%d) !!\n", Poi_NThread, (POT_NXT-2)*(POT_NXT-2) ); // constraint due to the reduction operation in "CUPOT_Poisson_10to14cube" and "CUPOT_PoissonSolver_MG" # if ( ( POT_SCHEME == SOR && defined USE_PSOLVER_10TO14 ) || POT_SCHEME == MG ) if ( Poisson && Poi_NThread < 64 ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must >= 64) !!\n", "Poi_NThread", Poi_NThread ); # endif // constraint in "CUPOT_PoissonSolver_SOR_16to18cube" # if ( POT_SCHEME == SOR && !defined USE_PSOLVER_10TO14 ) if ( Poisson && Poi_NThread != RHO_NXT*RHO_NXT/2 ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must == %d) !!\n", "Poi_NThread", Poi_NThread, RHO_NXT*RHO_NXT/2 ); # endif if ( GraAcc ) { if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot ) { if ( h_Corner_Array == NULL ) Aux_Error( ERROR_INFO, "h_Corner_Array == NULL !!\n" ); if ( d_Corner_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_Corner_Array_G == NULL !!\n" ); } # ifdef UNSPLIT_GRAVITY if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) { if ( h_Pot_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Pot_Array_USG == NULL !!\n" ); if ( d_Pot_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Pot_Array_USG_G == NULL !!\n" ); } if ( h_Flu_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Flu_Array_USG == NULL !!\n" ); if ( d_Flu_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Flu_Array_USG_G == NULL !!\n" ); # endif # ifdef DUAL_ENERGY if ( h_DE_Array == NULL ) Aux_Error( ERROR_INFO, "h_DE_Array == NULL !!\n" ); if ( d_DE_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_DE_Array_G == NULL !!\n" ); # endif # ifdef MHD if ( h_Emag_Array == NULL ) Aux_Error( ERROR_INFO, "h_Emag_Array == NULL !!\n" ); if ( d_Emag_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_Emag_Array_G == NULL !!\n" ); # endif } # endif // #ifdef GAMER_DEBUG if ( Poisson && ( IntScheme != INT_CQUAD && IntScheme != INT_QUAD ) ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "IntScheme", IntScheme ); int *NPatch_per_Stream = new int [GPU_NStream]; int *Rho_MemSize = new int [GPU_NStream]; int *Pot_MemSize_In = new int [GPU_NStream]; int *Pot_MemSize_Out = new int [GPU_NStream]; int *Flu_MemSize = new int [GPU_NStream]; int *Corner_MemSize = new int [GPU_NStream]; int *UsedPatch = new int [GPU_NStream]; # ifdef UNSPLIT_GRAVITY int *Pot_USG_MemSize = new int [GPU_NStream]; int *Flu_USG_MemSize = new int [GPU_NStream]; # endif # ifdef DUAL_ENERGY int *DE_MemSize = new int [GPU_NStream]; # endif # ifdef MHD int *Emag_MemSize = new int [GPU_NStream]; # endif // set the number of patches in each stream UsedPatch[0] = 0; if ( GPU_NStream == 1 ) NPatch_per_Stream[0] = NPatch; else { for (int s=0; s<GPU_NStream-1; s++) { NPatch_per_Stream[s] = NPatch/GPU_NStream; UsedPatch[s+1] = UsedPatch[s] + NPatch_per_Stream[s]; } NPatch_per_Stream[GPU_NStream-1] = NPatch - UsedPatch[GPU_NStream-1]; } // set the size of data to be transferred into GPU in each stream for (int s=0; s<GPU_NStream; s++) { Rho_MemSize [s] = NPatch_per_Stream[s]*CUBE(RHO_NXT )*sizeof(real); Pot_MemSize_In [s] = NPatch_per_Stream[s]*CUBE(POT_NXT )*sizeof(real); Pot_MemSize_Out[s] = NPatch_per_Stream[s]*CUBE(GRA_NXT )*sizeof(real); Flu_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real)*GRA_NIN; Corner_MemSize [s] = NPatch_per_Stream[s]*3 *sizeof(double); # ifdef UNSPLIT_GRAVITY Pot_USG_MemSize[s] = NPatch_per_Stream[s]*CUBE(USG_NXT_G)*sizeof(real); Flu_USG_MemSize[s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real)*(GRA_NIN-1); # endif # ifdef DUAL_ENERGY DE_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(char); # endif # ifdef MHD Emag_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real); # endif } // a. copy data from host to device //========================================================================================= for (int s=0; s<GPU_NStream; s++) { if ( NPatch_per_Stream[s] == 0 ) continue; if ( Poisson ) { CUDA_CHECK_ERROR( hipMemcpyAsync( d_Rho_Array_P + UsedPatch[s], h_Rho_Array + UsedPatch[s], Rho_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) ); CUDA_CHECK_ERROR( hipMemcpyAsync( d_Pot_Array_P_In + UsedPatch[s], h_Pot_Array_In + UsedPatch[s], Pot_MemSize_In[s], hipMemcpyHostToDevice, Stream[s] ) ); } if ( GraAcc ) { if ( ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) && !Poisson ) CUDA_CHECK_ERROR( hipMemcpyAsync( d_Pot_Array_P_Out + UsedPatch[s], h_Pot_Array_Out + UsedPatch[s], Pot_MemSize_Out[s], hipMemcpyHostToDevice, Stream[s] ) ); CUDA_CHECK_ERROR( hipMemcpyAsync( d_Flu_Array_G + UsedPatch[s], h_Flu_Array + UsedPatch[s], Flu_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) ); if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot ) CUDA_CHECK_ERROR( hipMemcpyAsync( d_Corner_Array_G + UsedPatch[s], h_Corner_Array + UsedPatch[s], Corner_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) ); # ifdef UNSPLIT_GRAVITY if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) CUDA_CHECK_ERROR( hipMemcpyAsync( d_Pot_Array_USG_G + UsedPatch[s], h_Pot_Array_USG + UsedPatch[s], Pot_USG_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) ); CUDA_CHECK_ERROR( hipMemcpyAsync( d_Flu_Array_USG_G + UsedPatch[s], h_Flu_Array_USG + UsedPatch[s], Flu_USG_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) ); # endif # ifdef DUAL_ENERGY CUDA_CHECK_ERROR( hipMemcpyAsync( d_DE_Array_G + UsedPatch[s], h_DE_Array + UsedPatch[s], DE_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) ); # endif # ifdef MHD CUDA_CHECK_ERROR( hipMemcpyAsync( d_Emag_Array_G + UsedPatch[s], h_Emag_Array + UsedPatch[s], Emag_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) ); # endif } // if ( GraAcc ) } // for (int s=0; s<GPU_NStream; s++) // b. execute the kernel //========================================================================================= for (int s=0; s<GPU_NStream; s++) { if ( NPatch_per_Stream[s] == 0 ) continue; // b1. Poisson solver if ( Poisson ) { # if ( POT_SCHEME == SOR ) # ifdef USE_PSOLVER_10TO14 hipLaunchKernelGGL(( CUPOT_PoissonSolver_SOR_10to14cube) , dim3(NPatch_per_Stream[s]), dim3(Poi_Block_Dim), 0, Stream[s] , d_Rho_Array_P + UsedPatch[s], d_Pot_Array_P_In + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme ); # else hipLaunchKernelGGL(( CUPOT_PoissonSolver_SOR_16to18cube) , dim3(NPatch_per_Stream[s]), dim3(Poi_Block_Dim), 0, Stream[s] , d_Rho_Array_P + UsedPatch[s], d_Pot_Array_P_In + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme ); # endif // #ifdef USE_PSOLVER_10TO14 ... else ... # elif ( POT_SCHEME == MG ) hipLaunchKernelGGL(( CUPOT_PoissonSolver_MG) , dim3(NPatch_per_Stream[s]), dim3(Poi_Block_Dim), 0, Stream[s] , d_Rho_Array_P + UsedPatch[s], d_Pot_Array_P_In + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], dh, MG_Max_Iter, MG_NPre_Smooth, MG_NPost_Smooth, MG_Tolerated_Error, Poi_Coeff, IntScheme ); # else # error : unsupported GPU Poisson solver # endif // POT_SCHEME } // if ( Poisson ) // b2. Gravity solver if ( GraAcc ) { # if ( MODEL == HYDRO ) hipLaunchKernelGGL(( CUPOT_HydroGravitySolver) , dim3(NPatch_per_Stream[s]), dim3(Gra_Block_Dim), 0, Stream[s] , d_Flu_Array_G + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], d_Corner_Array_G + UsedPatch[s], d_Pot_Array_USG_G + UsedPatch[s], d_Flu_Array_USG_G + UsedPatch[s], d_DE_Array_G + UsedPatch[s], d_Emag_Array_G + UsedPatch[s], dt, dh, P5_Gradient, GravityType, GPUExtAcc_Ptr, TimeNew, TimeOld, MinEint ); # elif ( MODEL == ELBDM ) hipLaunchKernelGGL(( CUPOT_ELBDMGravitySolver) , dim3(NPatch_per_Stream[s]), dim3(Gra_Block_Dim), 0, Stream[s] , d_Flu_Array_G + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], d_Corner_Array_G + UsedPatch[s], ELBDM_EtaDt, dh, ELBDM_Lambda, ExtPot, GPUExtPot_Ptr, TimeNew ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL } // if ( GraAcc ) CUDA_CHECK_ERROR( hipGetLastError() ); } // for (int s=0; s<GPU_NStream; s++) // c. copy data from device to host //========================================================================================= for (int s=0; s<GPU_NStream; s++) { if ( NPatch_per_Stream[s] == 0 ) continue; if ( Poisson ) CUDA_CHECK_ERROR( hipMemcpyAsync( h_Pot_Array_Out + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], Pot_MemSize_Out[s], hipMemcpyDeviceToHost, Stream[s] ) ); if ( GraAcc ) { CUDA_CHECK_ERROR( hipMemcpyAsync( h_Flu_Array + UsedPatch[s], d_Flu_Array_G + UsedPatch[s], Flu_MemSize[s], hipMemcpyDeviceToHost, Stream[s] ) ); # ifdef DUAL_ENERGY CUDA_CHECK_ERROR( hipMemcpyAsync( h_DE_Array + UsedPatch[s], d_DE_Array_G + UsedPatch[s], DE_MemSize[s], hipMemcpyDeviceToHost, Stream[s] ) ); # endif } } // for (int s=0; s<GPU_NStream; s++) delete [] NPatch_per_Stream; delete [] Rho_MemSize; delete [] Pot_MemSize_In; delete [] Pot_MemSize_Out; delete [] Flu_MemSize; delete [] Corner_MemSize; delete [] UsedPatch; # ifdef UNSPLIT_GRAVITY delete [] Pot_USG_MemSize; delete [] Flu_USG_MemSize; # endif # ifdef DUAL_ENERGY delete [] DE_MemSize; # endif # ifdef MHD delete [] Emag_MemSize; # endif } // FUNCTION : CUAPI_Asyn_PoissonGravitySolver #endif // #if ( defined GPU && defined GRAVITY )
d160c8f1219f501b27f054aebf4e01e41829edde.cu
#include "CUAPI.h" #include "CUPOT.h" #if ( defined GPU && defined GRAVITY ) // Poisson solver prototypes #if ( POT_SCHEME == SOR ) #ifdef USE_PSOLVER_10TO14 __global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #else __global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #endif // #ifdef USE_PSOLVER_10TO14 ... else ... #elif ( POT_SCHEME == MG ) __global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const real dh_Min, const int Max_Iter, const int NPre_Smooth, const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme ); #endif // POT_SCHEME // Gravity solver prototypes #if ( MODEL == HYDRO ) __global__ void CUPOT_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ CUBE(PS1) ], const real g_Pot_Array_New[][ CUBE(GRA_NXT) ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_G) ], const real g_Flu_Array_USG[][GRA_NIN-1][ CUBE(PS1) ], char g_DE_Array [][ CUBE(PS1) ], const real g_Emag_Array [][ CUBE(PS1) ], const real dt, const real dh, const bool P5_Gradient, const OptGravityType_t GravityType, ExtAcc_t ExtAcc_Func, const double TimeNew, const double TimeOld, const real MinEint ); #elif ( MODEL == ELBDM ) __global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ PS1*PS1*PS1 ], const real g_Pot_Array[][ GRA_NXT*GRA_NXT*GRA_NXT ], const double g_Corner_Array[][3], const real EtaDt, const real dh, const real Lambda, const bool ExtPot, ExtPot_t ExtPot_Func, const double TimeNew ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL // declare all device pointers extern real (*d_Rho_Array_P )[ CUBE(RHO_NXT) ]; extern real (*d_Pot_Array_P_In )[ CUBE(POT_NXT) ]; extern real (*d_Pot_Array_P_Out)[ CUBE(GRA_NXT) ]; extern real (*d_Flu_Array_G )[GRA_NIN][ CUBE(PS1)]; extern double (*d_Corner_Array_G)[3]; #if ( MODEL == HYDRO ) #ifdef UNSPLIT_GRAVITY extern real (*d_Pot_Array_USG_G)[ CUBE(USG_NXT_G) ]; extern real (*d_Flu_Array_USG_G)[GRA_NIN-1][ CUBE(PS1) ]; #else static real (*d_Pot_Array_USG_G)[ CUBE(USG_NXT_G) ] = NULL; static real (*d_Flu_Array_USG_G)[GRA_NIN-1][ CUBE(PS1) ] = NULL; #endif #ifdef DUAL_ENERGY extern char (*d_DE_Array_G)[ CUBE(PS1) ]; #else static char (*d_DE_Array_G)[ CUBE(PS1) ] = NULL; #endif #ifdef MHD extern real (*d_Emag_Array_G)[ CUBE(PS1) ]; #else static real (*d_Emag_Array_G)[ CUBE(PS1) ] = NULL; #endif #endif // #if ( MODEL == HYDRO ) extern cudaStream_t *Stream; //------------------------------------------------------------------------------------------------------- // Function : CUAPI_Asyn_PoissonGravitySolver // Description : Invoke the CUPOT_PoissonSolver_XXtoXXcube and/or CUPOT_GravitySolver kernel(s) to evaluate // the gravitational potential and/or advance the fluid variables by the gravitational // acceleration for a group of patches // // *********************************************************** // ** Asynchronous Function ** // ** ** // ** will return before the execution in GPU is complete ** // *********************************************************** // // Note : a. Use streams for the asychronous memory copy between device and host // b. Prefix "d" : for pointers pointing to the "Device" memory space // Prefix "h" : for pointers pointing to the "Host" memory space // // Parameter : h_Rho_Array : Host array storing the input density // h_Pot_Array_In : Host array storing the input "coarse-grid" potential for interpolation // h_Pot_Array_Out : Host array to store the output potential // h_Flu_Array : Host array to store the fluid variables for the Gravity solver // h_Corner_Array : Host array storing the physical corner coordinates of each patch // h_Pot_Array_USG : Host array storing the prepared potential for UNSPLIT_GRAVITY // h_Flu_Array_USG : Host array storing the prepared density + momentum for UNSPLIT_GRAVITY // h_DE_Array : Host array storing the dual-energy status (for both input and output) // h_Emag_Array : Host array storing the cell-centered magnetic energy (MHD only) // NPatchGroup : Number of patch groups evaluated simultaneously by GPU // dt : Time interval to advance solution // dh : Grid size // SOR_Min_Iter : Minimum # of iterations for SOR // SOR_Max_Iter : Maximum # of iterations for SOR // SOR_Omega : Over-relaxation parameter // MG_Max_Iter : Maximum number of iterations for multigrid // MG_NPre_Smooth : Number of pre-smoothing steps for multigrid // MG_NPos_tSmooth : Number of post-smoothing steps for multigrid // MG_Tolerated_Error : Maximum tolerated error for multigrid // Poi_Coeff : Coefficient in front of density in the Poisson equation (4*Pi*Newton_G*a) // IntScheme : Interpolation scheme for potential // --> currently supported schemes include // INT_CQUAD : conservative quadratic interpolation // INT_QUAD : quadratic interpolation // P5_Gradient : Use 5-points stencil to evaluate the potential gradient // ELBDM_Eta : Particle mass / Planck constant in ELBDM // ELBDM_Lambda : Quartic self-interaction coefficient in ELBDM // Poisson : true --> invoke the Poisson solver // GraAcc : true --> invoke the Gravity solver // GPU_NStream : Number of CUDA streams for the asynchronous memory copy // GravityType : Types of gravity --> self-gravity, external gravity, both // TimeNew : Physical time at the current step (for the external gravity solver) // TimeOld : Physical time at the previous step (for the external gravity solver in UNSPLIT_GRAVITY) // ExtPot : Add the external potential // MinEint : Internal energy floor // // Useless parameters in HYDRO : ELBDM_Eta, ELBDM_Lambda // Useless parameters in ELBDM : P5_Gradient //------------------------------------------------------------------------------------------------------- void CUAPI_Asyn_PoissonGravitySolver( const real h_Rho_Array [][RHO_NXT][RHO_NXT][RHO_NXT], const real h_Pot_Array_In [][POT_NXT][POT_NXT][POT_NXT], real h_Pot_Array_Out[][GRA_NXT][GRA_NXT][GRA_NXT], real h_Flu_Array [][GRA_NIN][PS1][PS1][PS1], const double h_Corner_Array[][3], const real h_Pot_Array_USG[][USG_NXT_G][USG_NXT_G][USG_NXT_G], const real h_Flu_Array_USG[][GRA_NIN-1][PS1][PS1][PS1], char h_DE_Array [][PS1][PS1][PS1], const real h_Emag_Array [][PS1][PS1][PS1], const int NPatchGroup, const real dt, const real dh, const int SOR_Min_Iter, const int SOR_Max_Iter, const real SOR_Omega, const int MG_Max_Iter, const int MG_NPre_Smooth, const int MG_NPost_Smooth, const real MG_Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme, const bool P5_Gradient, const real ELBDM_Eta, const real ELBDM_Lambda, const bool Poisson, const bool GraAcc, const int GPU_NStream, const OptGravityType_t GravityType, const double TimeNew, const double TimeOld, const bool ExtPot, const real MinEint ) { // model-independent constants # if ( POT_SCHEME == SOR ) const dim3 Poi_Block_Dim( RHO_NXT/2, RHO_NXT, POT_BLOCK_SIZE_Z ); # elif ( POT_SCHEME == MG ) const dim3 Poi_Block_Dim( POT_BLOCK_SIZE_X, 1, 1 ); # endif const dim3 Gra_Block_Dim( GRA_BLOCK_SIZE ); const int NPatch = NPatchGroup*8; # if ( POT_SCHEME == SOR ) const real Poi_Const = Poi_Coeff*dh*dh; const real SOR_Omega_6 = SOR_Omega/6.0; # endif // model-dependent constants # if ( MODEL == HYDRO ) # elif ( MODEL == ELBDM ) const real ELBDM_EtaDt = ELBDM_Eta*dt; # else # error : ERROR : unsupported MODEL !! # endif // check # if ( MODEL == ELBDM && !defined STORE_POT_GHOST && GRA_GHOST_SIZE != 0 ) # warning : WARNING : GRA_GHOST_SIZE != 0 in ELBDM (without STORE_POT_GHOST) !! # endif # ifdef GAMER_DEBUG const int Poi_NThread = Poi_Block_Dim.x * Poi_Block_Dim.y * Poi_Block_Dim.z; // minimum number of threads for spatial interpolation if ( Poisson && Poi_NThread < (POT_NXT-2)*(POT_NXT-2) ) Aux_Error( ERROR_INFO, "Poi_NThread (%d) < (POT_NXT-2)*(POT_NXT-2) (%d) !!\n", Poi_NThread, (POT_NXT-2)*(POT_NXT-2) ); // constraint due to the reduction operation in "CUPOT_Poisson_10to14cube" and "CUPOT_PoissonSolver_MG" # if ( ( POT_SCHEME == SOR && defined USE_PSOLVER_10TO14 ) || POT_SCHEME == MG ) if ( Poisson && Poi_NThread < 64 ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must >= 64) !!\n", "Poi_NThread", Poi_NThread ); # endif // constraint in "CUPOT_PoissonSolver_SOR_16to18cube" # if ( POT_SCHEME == SOR && !defined USE_PSOLVER_10TO14 ) if ( Poisson && Poi_NThread != RHO_NXT*RHO_NXT/2 ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must == %d) !!\n", "Poi_NThread", Poi_NThread, RHO_NXT*RHO_NXT/2 ); # endif if ( GraAcc ) { if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot ) { if ( h_Corner_Array == NULL ) Aux_Error( ERROR_INFO, "h_Corner_Array == NULL !!\n" ); if ( d_Corner_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_Corner_Array_G == NULL !!\n" ); } # ifdef UNSPLIT_GRAVITY if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) { if ( h_Pot_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Pot_Array_USG == NULL !!\n" ); if ( d_Pot_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Pot_Array_USG_G == NULL !!\n" ); } if ( h_Flu_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Flu_Array_USG == NULL !!\n" ); if ( d_Flu_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Flu_Array_USG_G == NULL !!\n" ); # endif # ifdef DUAL_ENERGY if ( h_DE_Array == NULL ) Aux_Error( ERROR_INFO, "h_DE_Array == NULL !!\n" ); if ( d_DE_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_DE_Array_G == NULL !!\n" ); # endif # ifdef MHD if ( h_Emag_Array == NULL ) Aux_Error( ERROR_INFO, "h_Emag_Array == NULL !!\n" ); if ( d_Emag_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_Emag_Array_G == NULL !!\n" ); # endif } # endif // #ifdef GAMER_DEBUG if ( Poisson && ( IntScheme != INT_CQUAD && IntScheme != INT_QUAD ) ) Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "IntScheme", IntScheme ); int *NPatch_per_Stream = new int [GPU_NStream]; int *Rho_MemSize = new int [GPU_NStream]; int *Pot_MemSize_In = new int [GPU_NStream]; int *Pot_MemSize_Out = new int [GPU_NStream]; int *Flu_MemSize = new int [GPU_NStream]; int *Corner_MemSize = new int [GPU_NStream]; int *UsedPatch = new int [GPU_NStream]; # ifdef UNSPLIT_GRAVITY int *Pot_USG_MemSize = new int [GPU_NStream]; int *Flu_USG_MemSize = new int [GPU_NStream]; # endif # ifdef DUAL_ENERGY int *DE_MemSize = new int [GPU_NStream]; # endif # ifdef MHD int *Emag_MemSize = new int [GPU_NStream]; # endif // set the number of patches in each stream UsedPatch[0] = 0; if ( GPU_NStream == 1 ) NPatch_per_Stream[0] = NPatch; else { for (int s=0; s<GPU_NStream-1; s++) { NPatch_per_Stream[s] = NPatch/GPU_NStream; UsedPatch[s+1] = UsedPatch[s] + NPatch_per_Stream[s]; } NPatch_per_Stream[GPU_NStream-1] = NPatch - UsedPatch[GPU_NStream-1]; } // set the size of data to be transferred into GPU in each stream for (int s=0; s<GPU_NStream; s++) { Rho_MemSize [s] = NPatch_per_Stream[s]*CUBE(RHO_NXT )*sizeof(real); Pot_MemSize_In [s] = NPatch_per_Stream[s]*CUBE(POT_NXT )*sizeof(real); Pot_MemSize_Out[s] = NPatch_per_Stream[s]*CUBE(GRA_NXT )*sizeof(real); Flu_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real)*GRA_NIN; Corner_MemSize [s] = NPatch_per_Stream[s]*3 *sizeof(double); # ifdef UNSPLIT_GRAVITY Pot_USG_MemSize[s] = NPatch_per_Stream[s]*CUBE(USG_NXT_G)*sizeof(real); Flu_USG_MemSize[s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real)*(GRA_NIN-1); # endif # ifdef DUAL_ENERGY DE_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(char); # endif # ifdef MHD Emag_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real); # endif } // a. copy data from host to device //========================================================================================= for (int s=0; s<GPU_NStream; s++) { if ( NPatch_per_Stream[s] == 0 ) continue; if ( Poisson ) { CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Rho_Array_P + UsedPatch[s], h_Rho_Array + UsedPatch[s], Rho_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) ); CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_P_In + UsedPatch[s], h_Pot_Array_In + UsedPatch[s], Pot_MemSize_In[s], cudaMemcpyHostToDevice, Stream[s] ) ); } if ( GraAcc ) { if ( ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) && !Poisson ) CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_P_Out + UsedPatch[s], h_Pot_Array_Out + UsedPatch[s], Pot_MemSize_Out[s], cudaMemcpyHostToDevice, Stream[s] ) ); CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Flu_Array_G + UsedPatch[s], h_Flu_Array + UsedPatch[s], Flu_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) ); if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot ) CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Corner_Array_G + UsedPatch[s], h_Corner_Array + UsedPatch[s], Corner_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) ); # ifdef UNSPLIT_GRAVITY if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_USG_G + UsedPatch[s], h_Pot_Array_USG + UsedPatch[s], Pot_USG_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) ); CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Flu_Array_USG_G + UsedPatch[s], h_Flu_Array_USG + UsedPatch[s], Flu_USG_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) ); # endif # ifdef DUAL_ENERGY CUDA_CHECK_ERROR( cudaMemcpyAsync( d_DE_Array_G + UsedPatch[s], h_DE_Array + UsedPatch[s], DE_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) ); # endif # ifdef MHD CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Emag_Array_G + UsedPatch[s], h_Emag_Array + UsedPatch[s], Emag_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) ); # endif } // if ( GraAcc ) } // for (int s=0; s<GPU_NStream; s++) // b. execute the kernel //========================================================================================= for (int s=0; s<GPU_NStream; s++) { if ( NPatch_per_Stream[s] == 0 ) continue; // b1. Poisson solver if ( Poisson ) { # if ( POT_SCHEME == SOR ) # ifdef USE_PSOLVER_10TO14 CUPOT_PoissonSolver_SOR_10to14cube <<< NPatch_per_Stream[s], Poi_Block_Dim, 0, Stream[s] >>> ( d_Rho_Array_P + UsedPatch[s], d_Pot_Array_P_In + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme ); # else CUPOT_PoissonSolver_SOR_16to18cube <<< NPatch_per_Stream[s], Poi_Block_Dim, 0, Stream[s] >>> ( d_Rho_Array_P + UsedPatch[s], d_Pot_Array_P_In + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme ); # endif // #ifdef USE_PSOLVER_10TO14 ... else ... # elif ( POT_SCHEME == MG ) CUPOT_PoissonSolver_MG <<< NPatch_per_Stream[s], Poi_Block_Dim, 0, Stream[s] >>> ( d_Rho_Array_P + UsedPatch[s], d_Pot_Array_P_In + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], dh, MG_Max_Iter, MG_NPre_Smooth, MG_NPost_Smooth, MG_Tolerated_Error, Poi_Coeff, IntScheme ); # else # error : unsupported GPU Poisson solver # endif // POT_SCHEME } // if ( Poisson ) // b2. Gravity solver if ( GraAcc ) { # if ( MODEL == HYDRO ) CUPOT_HydroGravitySolver <<< NPatch_per_Stream[s], Gra_Block_Dim, 0, Stream[s] >>> ( d_Flu_Array_G + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], d_Corner_Array_G + UsedPatch[s], d_Pot_Array_USG_G + UsedPatch[s], d_Flu_Array_USG_G + UsedPatch[s], d_DE_Array_G + UsedPatch[s], d_Emag_Array_G + UsedPatch[s], dt, dh, P5_Gradient, GravityType, GPUExtAcc_Ptr, TimeNew, TimeOld, MinEint ); # elif ( MODEL == ELBDM ) CUPOT_ELBDMGravitySolver <<< NPatch_per_Stream[s], Gra_Block_Dim, 0, Stream[s] >>> ( d_Flu_Array_G + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], d_Corner_Array_G + UsedPatch[s], ELBDM_EtaDt, dh, ELBDM_Lambda, ExtPot, GPUExtPot_Ptr, TimeNew ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL } // if ( GraAcc ) CUDA_CHECK_ERROR( cudaGetLastError() ); } // for (int s=0; s<GPU_NStream; s++) // c. copy data from device to host //========================================================================================= for (int s=0; s<GPU_NStream; s++) { if ( NPatch_per_Stream[s] == 0 ) continue; if ( Poisson ) CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Pot_Array_Out + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s], Pot_MemSize_Out[s], cudaMemcpyDeviceToHost, Stream[s] ) ); if ( GraAcc ) { CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Flu_Array + UsedPatch[s], d_Flu_Array_G + UsedPatch[s], Flu_MemSize[s], cudaMemcpyDeviceToHost, Stream[s] ) ); # ifdef DUAL_ENERGY CUDA_CHECK_ERROR( cudaMemcpyAsync( h_DE_Array + UsedPatch[s], d_DE_Array_G + UsedPatch[s], DE_MemSize[s], cudaMemcpyDeviceToHost, Stream[s] ) ); # endif } } // for (int s=0; s<GPU_NStream; s++) delete [] NPatch_per_Stream; delete [] Rho_MemSize; delete [] Pot_MemSize_In; delete [] Pot_MemSize_Out; delete [] Flu_MemSize; delete [] Corner_MemSize; delete [] UsedPatch; # ifdef UNSPLIT_GRAVITY delete [] Pot_USG_MemSize; delete [] Flu_USG_MemSize; # endif # ifdef DUAL_ENERGY delete [] DE_MemSize; # endif # ifdef MHD delete [] Emag_MemSize; # endif } // FUNCTION : CUAPI_Asyn_PoissonGravitySolver #endif // #if ( defined GPU && defined GRAVITY )
353838d656a5de5a537e23831cbe1c356b5db275.hip
// !!! This is a file automatically generated by hipify!!! #include "../THCTensorMathReduce.cuh" #include "THHTensor.hpp" #include "THHStream.h" #include "../generic/THCTensorMathReduce.cu" #include "../THCGenerateByteType.h"
353838d656a5de5a537e23831cbe1c356b5db275.cu
#include "../THCTensorMathReduce.cuh" #include "THCTensor.hpp" #include "THCStream.h" #include "../generic/THCTensorMathReduce.cu" #include "../THCGenerateByteType.h"
317eb72539c5b6124610608ea8e0753dccdddc3d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /**********HEADERS**********/ #include <algorithm> #include <iostream> #include <iomanip> #include <string> #include <limits> #include <stdlib.h> #include <fstream> #include <math.h> #include <time.h> #include "cuda_ptr.cuh" #include "mimo-io.cuh" using namespace std; /**********DEFINING CONSTANTS***********/ #define NX 192 //was 201 #define NY 192 //was 201 #define NT 401 #define NS 640 //number of sensors #define HX 0.001f #define HY 0.001f #define H 0.001f #define DT 3.3333e-07f #define OMEGAC 7.8540e+05f #define TAO 4.0000e-06f #define TT 8.1573e-06f /**********FUNCTION DECLARATION**********/ //Host Functions void Ultrasonic_Tomography(const string&, int, int, float, int, float, float, float, float); void Position_Transducers(host_ptr<int>, host_ptr<int>, int); float norm(host_ptr<float>, int, int); //In-Line Functions inline int grid_size(int, int); template <typename T> __host__ __device__ void minmax(T &a, T &b); //Device Functions __global__ void propagation(kernel_ptr<int> const, kernel_ptr<int> const, kernel_ptr<float> const, kernel_ptr<float>, int, int, int, int, int); __global__ void propagation_at_corners(kernel_ptr<float>, int, int, int); __global__ void difference_signal(kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>, int, int, int); __global__ void backpropagation1(kernel_ptr<float>, kernel_ptr<float> const, int, int, int, int); __global__ void backpropagation2(kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, int, int, int, int); __global__ void laplace(kernel_ptr<float> const, kernel_ptr<float>, int, int, int); __global__ void laplace_corners(kernel_ptr<float> const, kernel_ptr<float>, int, int, int); __global__ void update_differential(kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, int, int, int); __global__ void update_field(kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float> const, float, int); __global__ void weights_differential1( kernel_ptr<float> norm, kernel_ptr<float> const df, kernel_ptr<float> const df_avg, int Ng) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int g = threadIdx.z + blockIdx.z * blockDim.z; if (i < NX && j < NY && g < Ng) { float val = df(i, j, g) - (df_avg(i, j) / Ng); atomicAdd( &norm(i, g), val * val); } } __global__ void weights_differential2( kernel_ptr<float> weights, kernel_ptr<float> total_weight, kernel_ptr<float> const norm, int Ng) { int g = threadIdx.x + blockIdx.x * blockDim.x; if (g < Ng) { float sum = 0.f; for (int i = 0; i < NX; ++i) { sum += norm(i, g); } weights(g) = 1.f / sqrtf(sum); atomicAdd( &total_weight(0), weights(g)); } } __global__ void average_differential( kernel_ptr<float> df_avg, kernel_ptr<float> const df, kernel_ptr<float> const weights, kernel_ptr<float> const total_weight, int Ng) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int g = threadIdx.z + blockIdx.z * blockDim.z; if (i < NX && j < NY && g < Ng) { float weight = weights(g) / total_weight(0); atomicAdd( &df_avg(i, j), df(i, j, g) * weight); } } /***************MAIN PROGRAM***************/ int main(int argc, char **argv) { //Command Line Argument Processing if (argc != 10) { cerr << "Usage: " << argv[0] << " <fo_filename> <group size> <parallel groups> <target epsilon> <max iterations> <omega> <alpha> <beta> <gamma>\n\n"; exit(1); } string fo_filename = argv[1]; if (count(fo_filename.begin(), fo_filename.end(), '.') != 1) { cerr << "Error: '" << fo_filename << "' should have only one period.\n" << " It should be in the current directory " << "and have only one filetype extension.\n\n"; exit(1); } int group_size = stoi(argv[2]); int Np = stoi(argv[3]); float target_epsilon = stof(argv[4]); int max_iterations = stoi(argv[5]); float omega = stof(argv[6]); float alpha = stof(argv[7]); float beta = stof(argv[8]); float gamma = stof(argv[9]); if (max_iterations == -1) max_iterations = numeric_limits<int>::max(); cout << setprecision(9); cerr << setprecision(9); Ultrasonic_Tomography(fo_filename, group_size, Np, target_epsilon, max_iterations, omega, alpha, beta, gamma); hipDeviceReset(); } /**********HOST FUNCTION DEFINITIONS**********/ void Ultrasonic_Tomography(const string &fo_filename, int group_size, int Np, float target_epsilon, int max_iterations, float omega, float alpha, float beta, float gamma) { // fo(i, j) = // ground truth value at pos (i, j) of field host_ptr<float> fo(NX, NY); device_ptr<float> dev_fo(NX, NY); // Ng = number of sensor groups that will be launched in parallel int Ng = NS / group_size; // gg_xxx(i, k, g) = // initial signal at pos i in row/column xxx // at time k, from sensor group // e.g g_bottom stores the bottom row, // g_right stores the right column device_ptr<float> dev_g_bottom(NX, NT, Ng); device_ptr<float> dev_g_right(NY, NT, Ng); device_ptr<float> dev_g_top(NX, NT, Ng); device_ptr<float> dev_g_left(NY, NT, Ng); host_ptr<float> g_bottom(NX, NT, Ng); host_ptr<float> g_right(NY, NT, Ng); host_ptr<float> g_top(NX, NT, Ng); host_ptr<float> g_left(NY, NT, Ng); auto idx = fo_filename.find('.'); string basename = fo_filename.substr(0, idx); { ifstream fo_in(fo_filename); if (!fo_in) { cerr << "Error: '" << fo_filename << "' file not found in current directory.\n\n"; return; } string prefix = basename + "-data-"; string suffix = "-" + to_string(group_size) + ".txt"; string gb_name = prefix + "bottom" + suffix; string gr_name = prefix + "right" + suffix; string gt_name = prefix + "top" + suffix; string gl_name = prefix + "left" + suffix; ifstream gb_in(gb_name); ifstream gr_in(gr_name); ifstream gt_in(gt_name); ifstream gl_in(gl_name); if (!gb_in) { cerr << "Error: '" << gb_name << "' file not found in current directory.\n\n"; return; } if (!gr_in) { cerr << "Error: '" << gr_name << "' file not found in current directory.\n\n"; return; } if (!gt_in) { cerr << "Error: '" << gt_name << "' file not found in current directory.\n\n"; return; } if (!gl_in) { cerr << "Error: '" << gl_name << "' file not found in current directory.\n\n"; return; } read(fo_in, fo); copy(dev_fo, fo); read(gb_in, g_bottom); copy(dev_g_bottom, g_bottom); read(gr_in, g_right); copy(dev_g_right, g_right); read(gt_in, g_top); copy(dev_g_top, g_top); read(gl_in, g_left); copy(dev_g_left, g_left); } // Position of the transducers host_ptr<int> ii(NS); host_ptr<int> jj(NS); device_ptr<int> dev_ii(NS); device_ptr<int> dev_jj(NS); Position_Transducers(ii, jj, NS); // copy from host to device copy(dev_ii, ii); copy(dev_jj, jj); // u(i, j, k, g) = // wave propagation at pos (i, j) of field, at time k, from sensor group g device_ptr<float> dev_u(NX, NY, NT, Np); // Kaczmarz method // propagation // rr_xxx(i, k, g) = // difference signal between xxx sensors in u and gg_xxx // at time k, from sensor group g device_ptr<float> dev_rr_bottom(NX, NT, Ng); device_ptr<float> dev_rr_right(NX, NT, Ng); device_ptr<float> dev_rr_top(NX, NT, Ng); device_ptr<float> dev_rr_left(NX, NT, Ng); // z(i, j, k, g) = // wave back propagation at pos (i, j) of field, // at time k, from sensor group g device_ptr<float> dev_z(NX, NY, NT+1, Np); // Lu(i, j, k, g) = // result of applying the Laplace operator to u(i, j, k, g) device_ptr<float> dev_Lu(NX, NY, NT, Np); // f(i, j) = // current reconstruction of field at pos (i, j) host_ptr<float> f(NX, NY); device_ptr<float> dev_f(NX, NY); // df(i, j, g) = // discretized differential of f(i, j) from sensor group g device_ptr<float> dev_df(NX, NY, Ng); device_ptr<float> dev_df_avg(NX, NY); device_ptr<float> dev_norm(NX, Ng); device_ptr<float> dev_weights(Ng); device_ptr<float> dev_total_weight(1); // f_minus_fo(i, j) // difference of field and ground truth at pos (i, j) host_ptr<float> f_minus_fo(NX, NY); device_ptr<float> dev_f_minus_fo(NX, NY); // initialize epsilon values float prev_epsilon = 100.f; float curr_epsilon = -std::numeric_limits<float>::infinity(); float file_epsilon = std::numeric_limits<float>::infinity(); /* cerr << "writing convergence to 'sirt_convergence.txt'...\n" */ /* << "writing time to 'sirt_time.txt'...\n\n"; */ ofstream convergence_file("sirt_convergence.txt"); ofstream time_file("sirt_time.txt"); // kernel launch parameters for propagation dim3 threads_prop(32, 1, 4); dim3 grid_prop( grid_size(NX, threads_prop.x), grid_size(NY, threads_prop.y), grid_size(Np, threads_prop.z)); // kernel launch parameters for propagation_at_corners dim3 threads_prop_corners(32, 1); dim3 grid_prop_corners( grid_size(NT, threads_prop_corners.x), grid_size(Np, threads_prop_corners.y)); // kernel launch parameters for difference_signal dim3 threads_diff_signal(NX, 1, 1); dim3 grid_diff_signal( grid_size(NX, threads_diff_signal.x), grid_size(NT, threads_diff_signal.y), grid_size(Np, threads_diff_signal.z)); // kernel launch parameters for backpropagation1 dim3 threads_bp1(64, 2, 1); dim3 grid_bp1( grid_size(NX, threads_bp1.x), grid_size(NY, threads_bp1.y), grid_size(Np, threads_bp1.z)); // kernel launch parameters for backpropagation2 dim3 threads_bp2(32, 1); dim3 grid_bp2( grid_size(NX, threads_bp2.x), grid_size(Np, threads_bp2.y)); // kernel launch parameters for laplace dim3 threads_L(32, 2, 2); dim3 grid_L( grid_size(NX * NY, threads_L.x), grid_size(NT, threads_L.y), grid_size(Np, threads_L.z)); // kernel launch parameters for laplace_corners dim3 threads_L_corners(96, 1, 1); dim3 grid_L_corners( grid_size(NX * NY, threads_L.x), grid_size(NT, threads_L.y), grid_size(Np, threads_L.z)); // kernel launch parameters for update_differential dim3 threads_diff(64, 2, 2); dim3 grid_diff( grid_size(NX * NY, threads_diff.x), grid_size(NT, threads_diff.y), grid_size(Np, threads_diff.z)); // kernel launch parameters for field kernels dim3 threads_field(NX, 1); dim3 grid_field( grid_size(NX, threads_field.x), grid_size(NY, threads_field.y)); dim3 threads_weights1(NX, 1, 1); dim3 grid_weights1( grid_size(NX, threads_weights1.x), grid_size(NY, threads_weights1.y), grid_size(Ng, threads_weights1.z)); dim3 threads_weights2(1); dim3 grid_weights2( grid_size(Ng, threads_weights2.x)); dim3 threads_avg_diff(NX, 1, 1); dim3 grid_avg_diff( grid_size(NX, threads_avg_diff.x), grid_size(NY, threads_avg_diff.y), grid_size(Ng, threads_avg_diff.z)); cerr << "group size: " << group_size << "\n" << "target epsilon: " << target_epsilon << "\n" << "omega: " << omega << "\n" << "alpha: " << alpha << "\n" << "beta: " << beta << "\n" << "gamma: " << gamma << "\n\n"; int w_iter = 6; int w_eps = 12; int w_diff = 15; cout << setw(w_iter) << "iter" << " " << setw(w_eps) << "epsilon" << " " << setw(w_diff) << "difference" << " \n" << string(w_iter, '-') << " " << string(w_eps, '-') << " " << string(w_diff, '-') << " \n"; hipDeviceSynchronize(); int ti = clock(); bool diverged = false; bool reached = false; int iter = 0; while (!reached && !diverged && iter < max_iterations) { ++iter; dev_df.set(0.f); dev_df_avg.set(0.f); dev_total_weight.set(0.f); dev_norm.set(0.f); int total_steps = ceil((float)Ng / Np); for (int step = 0; step < total_steps; ++step) { dev_u.set(0.f); dev_z.set(0.f); dev_Lu.set(0.f); // propagate wave over field, store in u for (int k = 1; k < NT - 1; ++k) { hipLaunchKernelGGL(( propagation), dim3(grid_prop), dim3(threads_prop), 0, 0, dev_ii, dev_jj, dev_f, dev_u, k, group_size, step, Np, Ng); } hipLaunchKernelGGL(( propagation_at_corners), dim3(grid_prop_corners), dim3(threads_prop_corners), 0, 0, dev_u, step, Np, Ng); // store difference signal of u at sensor positions and initial signal at g in rr hipLaunchKernelGGL(( difference_signal), dim3(grid_diff_signal), dim3(threads_diff_signal), 0, 0, dev_u, dev_g_bottom, dev_g_right, dev_g_top, dev_g_left, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, step, Np, Ng); // do back propagation of wave over field, store in z for (int k = NT - 2; k > 0; k--) { hipLaunchKernelGGL(( backpropagation1), dim3(grid_bp1), dim3(threads_bp1), 0, 0, dev_z, dev_f, k, step, Np, Ng); hipLaunchKernelGGL(( backpropagation2), dim3(grid_bp2), dim3(threads_bp2), 0, 0, dev_z, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, k, step, Np, Ng); } // apply Laplace operator to u, store in Lu hipLaunchKernelGGL(( laplace), dim3(grid_L), dim3(threads_L), 0, 0, dev_u, dev_Lu, step, Np, Ng); hipLaunchKernelGGL(( laplace_corners), dim3(grid_L_corners), dim3(threads_L_corners), 0, 0, dev_u, dev_Lu, step, Np, Ng); // update differential of f, store in df hipLaunchKernelGGL(( update_differential), dim3(grid_diff), dim3(threads_diff), 0, 0, dev_df, dev_df_avg, dev_z, dev_Lu, dev_f, step, Np, Ng); } hipLaunchKernelGGL(( weights_differential1), dim3(grid_weights1), dim3(threads_weights1), 0, 0, dev_norm, dev_df, dev_df_avg, Ng); dev_df_avg.set(0.f); hipLaunchKernelGGL(( weights_differential2), dim3(grid_weights2), dim3(threads_weights2), 0, 0, dev_weights, dev_total_weight, dev_norm, Ng); hipLaunchKernelGGL(( average_differential), dim3(grid_avg_diff), dim3(threads_avg_diff), 0, 0, dev_df_avg, dev_df, dev_weights, dev_total_weight, Ng); float scale = omega * (alpha + (beta / pow(iter, gamma))); // update f and f_minus_fo hipLaunchKernelGGL(( update_field), dim3(grid_field), dim3(threads_field), 0, 0, dev_f, dev_df_avg, dev_f_minus_fo, dev_fo, scale, Ng); // error calculation // copy from device to host copy(f_minus_fo, dev_f_minus_fo); curr_epsilon = norm(f_minus_fo, NX, NY) / norm(fo, NX, NY) * 100.f; float current_t = (float)(clock()-ti) / CLOCKS_PER_SEC; if (abs(file_epsilon - curr_epsilon) > 0.2f) { convergence_file << curr_epsilon << " "; time_file << current_t << " "; file_epsilon = curr_epsilon; } cout << setw(w_iter) << iter << " " << setw(w_eps) << curr_epsilon << " " << setw(w_diff) << prev_epsilon - curr_epsilon << " " << scale << " \n"; // check ending conditions reached = curr_epsilon <= target_epsilon; diverged = curr_epsilon > prev_epsilon || std::isnan(curr_epsilon); // update prev_epsilon prev_epsilon = curr_epsilon; } if (reached) { cerr << "reached target epsilon: " << target_epsilon << ", at iter: " << iter << ", epsilon: " << curr_epsilon << "\n\n"; } else if (diverged) { cerr << "diverged at iter: " << iter << ", epsilon: " << curr_epsilon << "\n\n"; } else { cerr << "stopped at iter: " << iter << ", epsilon: " << curr_epsilon << "\n\n"; } hipDeviceSynchronize(); int tf = clock(); float elapsed_time = (float)(tf - ti) / CLOCKS_PER_SEC; cout << endl; cerr << "total time (s): " << elapsed_time << "\n" << "per iteration (s): " << elapsed_time / iter << "\n"; // copy from device to host copy(f, dev_f); string f_name = "sirt-norm-" + to_string(group_size) + "-" + basename + ".txt"; /* cerr << "writing to '" << f_name << "'...\n\n"; */ ofstream f_out(f_name); write(f_out, f); size_t free, total; hipMemGetInfo(&free, &total); cerr << "used mem: " << float(total - free) / (1024 * 1024) << " MB\n" << "free mem: " << float(free) / (1024 * 1024) << " MB\n" << "total mem: " << float(total) / (1024 * 1024) << " MB\n\n"; } float norm(host_ptr<float> A, int nx, int ny) { float sum = 0; for (int j = 0; j < ny; ++j) for (int i = 0; i < nx; ++i) sum += A(i, j) * A(i, j); return sqrtf(sum); } void Position_Transducers(host_ptr<int> ii, host_ptr<int> jj, int num) { //returns the (x,y) coordinates of the number of total transducers int p = 0; for(p = 0; p < 160; p++) { ii(p) = 21 + (p + 1); jj(p) = 181; } for(p = 160; p < 320; p++) { ii(p) = 181; jj(p) = 181 - ((p + 1) - 160); } for(p = 320; p < 480; p++) { ii(p) = 181 - ((p + 1) - 320); jj(p) = 21; } for(p = 480; p < num; p++) { ii(p) = 21; jj(p) = 21 + ((p + 1) - 480); } } /**********DEVICE FUNCTION DEFINITIONS***********/ __global__ void propagation( kernel_ptr<int> const ii, kernel_ptr<int> const jj, kernel_ptr<float> const f, kernel_ptr<float> u, int k, int group_size, int step, int Np, int Ng) { // Map from threadIdx / BlockIdx to pixel position int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int p = threadIdx.z + blockIdx.z * blockDim.z; int g = p + Np * step; if(i < NX && j < NY && p < Np && g < Ng) { float v = 1500.f * sqrtf(1.f + f(i, j)); float r = v * DT / HX; float s = 2.f - 4.f * r * r; float val; // will hold new u at (i, j, k + 1) // not at boundary if (i != 0 && i != NX - 1 && j != 0 && j != NY - 1) { val = r * r * (u(i+1, j, k, p) + u(i-1, j, k, p) + u(i, j-1, k, p) + u(i, j+1, k, p)) + s * u(i, j, k, p) - u(i, j, k-1, p); int sensor_idx = g * group_size; int jp1 = jj(sensor_idx); int jp2 = jj(sensor_idx + group_size - 1); int ip1 = ii(sensor_idx); int ip2 = ii(sensor_idx + group_size - 1); minmax(jp1, jp2); minmax(ip1, ip2); // at sensor, k <= 24 if (j + 1 >= jp1 && j + 1 <= jp2 && i + 1 >= ip1 && i + 1 <= ip2 && k + 1 <= 24) { float t = k * DT - TT; // add wave value val += v * v * DT * DT * cosf(OMEGAC * t) * expf(-(t * t) / (2.f * TAO * TAO)); } } // at boundary else { // boundary booleans bool top = (j == 0); bool bottom = (j == NY - 1); bool left = (i == 0); bool right = (i == NX - 1); // index variables for different boundary cases int ja = top ? (j + 1) : bottom ? (j - 1) : j; int jb = top ? (j + 2) : bottom ? (j - 2) : j; int ia = left ? (i + 1) : right ? (i - 1) : i; int ib = left ? (i + 2) : right ? (i - 2) : i; val = (2.f - 2.f * r - r * r) * u(i, j, k, p) + 2.f * r * (1.f + r) * u(ia, ja, k, p) - r * r * u(ib, jb, k, p) + (2.f * r - 1.f) * u(i, j, k-1, p) - 2.f * r * u(ia, ja, k-1, p); } u(i, j, k+1, p) = val; } } __global__ void propagation_at_corners( kernel_ptr<float> u, int step, int Np, int Ng) { int k = threadIdx.x + blockIdx.x * blockDim.x; int p = threadIdx.y + blockIdx.y * blockDim.y; int g = p + Np * step; if (k < NT && g < Ng && p < Np) { u(0, 0, k, p) = 1.f / 2.f * (u(0, 1, k, p) + u(1, 0, k, p)); u(NX-1, 0, k, p) = 1.f / 2.f * (u(NX-2, 0, k, p) + u(NX-1, 1, k, p)); u(0, NY-1, k, p) = 1.f / 2.f * (u(0, NY-2, k, p) + u(1, NY-1, k, p)); u(NX-1, NY-1, k, p) = 1.f / 2.f * (u(NX-2, NY-1, k, p) + u(NX-1, NY-2, k, p)); } } __global__ void difference_signal( kernel_ptr<float> const u, kernel_ptr<float> const g_bottom, kernel_ptr<float> const g_right, kernel_ptr<float> const g_top, kernel_ptr<float> const g_left, kernel_ptr<float> rr_bottom, kernel_ptr<float> rr_right, kernel_ptr<float> rr_top, kernel_ptr<float> rr_left, int step, int Np, int Ng) { int i = threadIdx.x + blockIdx.x * blockDim.x; int k = threadIdx.y + blockIdx.y * blockDim.y; int p = threadIdx.z + blockIdx.z * blockDim.z; int g = p + Np * step; if (i > 20 && i < 180 && k > 1 && k < NT && g < Ng && p < Np) { // store difference at time k of original signal // and current signal at bottom sensor row rr_bottom(i, k, g) = g_bottom(i, k, g) - u(i, 180, k, p); // store difference at time k of original signal // and current signal at top sensor row rr_top(i, k, g) = g_top(i, k, g) - u(i, 20, k, p); // store difference at time k of original signal // and current signal at right sensor column rr_right(i, k, g) = g_right(i, k, g) - u(180, i, k, p); // store difference at time k of original signal // and current signal at left sensor column rr_left(i, k, g) = g_left(i, k, g) - u(20, i, k, p); } } __global__ void backpropagation1( kernel_ptr<float> z, kernel_ptr<float> const f, int k, int step, int Np, int Ng) { // Map from threadIdx / BlockIdx to pixel position int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int p = threadIdx.z + blockIdx.z * blockDim.z; int g = p + Np * step; if(i >= 1 && i < (NX - 1) && j >= 1 && j < (NY - 1) && g < Ng && p < Np) { z(i, j, k, p) = 1500.f * 1500.f * (DT * DT) * ((1.f + f(i, j-1)) * z(i, j-1, k+1, p) + (1.f + f(i, j+1)) * z(i, j+1, k+1, p) + (1.f + f(i-1, j)) * z(i-1, j, k+1, p) + (1.f + f(i+1, j)) * z(i+1, j, k+1, p) - 4.f * (1.f + f(i, j)) * z(i, j, k+1, p)) / (H * H) + 2.f * z(i, j, k+1, p) - z(i, j, k+2, p); } } __global__ void backpropagation2( kernel_ptr<float> z, kernel_ptr<float> const rr_bottom, kernel_ptr<float> const rr_right, kernel_ptr<float> const rr_top, kernel_ptr<float> const rr_left, int k, int step, int Np, int Ng) { int i = threadIdx.x + blockIdx.x * blockDim.x; int p = threadIdx.y + blockIdx.y * blockDim.y; int g = p + Np * step; if (g < Ng && p < Np) { if(i >= 21 && i < 180) { z(i, 180, k, p) = z(i, 179, k, p) + rr_bottom(i, k, g) * H * 1000.f; z(i, 20, k, p) = z(i, 21, k, p) + rr_top(i, k, g) * H * 1000.f; z(180, i, k, p) = z(179, i, k, p) + rr_right(i, k, g) * H * 1000.f; z(20, i, k, p) = z(21, i, k, p) + rr_left(i, k, g) * H * 1000.f; } if (i >= 1 && i < (NX - 1)) { z(i, 0, k, p) = z(i, 1, k, p); z(i, NY-1, k, p) = z(i, NY-2, k, p); z(0, i, k, p) = z(1, i, k, p); z(NX-1, i, k, p) = z(NX-2, i, k, p); } else if (i == 0) { z(0, 0, k, p) = (z(1, 0, k, p) + z(0, 1, k, p)) / 2.f; z(NX-1, 0, k, p) = (z(NX-2, 0, k, p) + z(NX-1, 1, k, p)) / 2.f; z(0, NY-1, k, p) = (z(1, NY-1, k, p) + z(0, NY-2, k, p)) / 2.f; z(NX-1, NY-1, k, p) = (z(NX-2, NY-1, k, p) + z(NX-1, NY-2, k, p)) / 2.f; } } } __global__ void laplace( kernel_ptr<float> const u, kernel_ptr<float> Lu, int step, int Np, int Ng) { // Map from threadIdx / BlockIdx to pixel position int tx = threadIdx.x + blockIdx.x * blockDim.x; int k = threadIdx.y + blockIdx.y * blockDim.y; int p = threadIdx.z + blockIdx.z * blockDim.z; int g = p + Np * step; if (tx < (NX * NY) && (k + 1) < NT && g < Ng && p < Np) { int i = tx % NX; int j = tx / NX; int ja = (j > 0) ? (j - 1) : j; int jb = (j < NY - 1) ? (j + 1) : j; int ia = (i > 0) ? (i - 1) : i; int ib = (i < NX - 1) ? (i + 1) : i; Lu(i, j, k+1, p) = (u(i, ja, k+1, p) + u(i, jb, k+1, p) + u(ia, j, k+1, p) + u(ib, j, k+1, p) - 4.f * u(i, j, k+1, p)) / (H * H); } } __global__ void laplace_corners( kernel_ptr<float> const u, kernel_ptr<float> Lu, int step, int Np, int Ng) { int k = threadIdx.x + blockIdx.x * blockDim.x; int p = threadIdx.y + blockIdx.y * blockDim.y; int g = p + Np * step; if ((k + 1) < NT && g < Ng && p < Np) { Lu(0, 0, k+1, p) = (Lu(1, 0, k+1, p) + Lu(0, 1, k+1, p)) / 2.f; Lu(NX-1, 0, k+1, p) = (Lu(NX-2, 0, k+1, p) + Lu(NX-1, 1, k+1, p)) / 2.f; Lu(0, NY-1, k+1, p) = (Lu(1, NY-1, k+1, p) + Lu(0, NY-2, k+1, p)) / 2.f; Lu(NX-1, NY-1, k+1, p) = (Lu(NX-2, NY-1, k+1, p) + Lu(NX-1, NY-2, k+1, p)) / 2.f; } } __global__ void update_differential( kernel_ptr<float> df, kernel_ptr<float> df_avg, kernel_ptr<float> const z, kernel_ptr<float> const Lu, kernel_ptr<float> const f, int step, int Np, int Ng) { // Map from threadIdx / BlockIdx to pixel position int tx = threadIdx.x + blockIdx.x * blockDim.x; int k = threadIdx.y + blockIdx.y * blockDim.y; int p = threadIdx.z + blockIdx.z * blockDim.z; int g = p + Np * step; if (tx < (NX * NY) && (k + 1) < NT && g < Ng && p < Np) { int i = tx % NX; int j = tx / NX; float val = z(i, j, k+1, p) * Lu(i, j, k+1, p) / (1.f + f(i, j)); atomicAdd(&df(i, j, g), val); atomicAdd(&df_avg(i, j), val); } } __global__ void update_field( kernel_ptr<float> f, kernel_ptr<float> const df_avg, kernel_ptr<float> f_minus_fo, kernel_ptr<float> const fo, float scale, int Ng) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < NX && j < NY) { bool in_sensor_field = (i >= 21) && (i < 180) && (j >= 21) && (j < 180); if (in_sensor_field) f(i, j) += scale * df_avg(i, j); f_minus_fo(i, j) = f(i, j) - fo(i, j); } } /**********INLINE FUNCTION DEFINITIONS**********/ inline int grid_size(int n, int threads) { return ceil(float(n) / threads); } // POST-CONDITION: a <= b template <typename T> __host__ __device__ void minmax(T &a, T &b) { if (a > b) { int t = a; a = b; b = t; } }
317eb72539c5b6124610608ea8e0753dccdddc3d.cu
/**********HEADERS**********/ #include <algorithm> #include <iostream> #include <iomanip> #include <string> #include <limits> #include <stdlib.h> #include <fstream> #include <math.h> #include <time.h> #include "cuda_ptr.cuh" #include "mimo-io.cuh" using namespace std; /**********DEFINING CONSTANTS***********/ #define NX 192 //was 201 #define NY 192 //was 201 #define NT 401 #define NS 640 //number of sensors #define HX 0.001f #define HY 0.001f #define H 0.001f #define DT 3.3333e-07f #define OMEGAC 7.8540e+05f #define TAO 4.0000e-06f #define TT 8.1573e-06f /**********FUNCTION DECLARATION**********/ //Host Functions void Ultrasonic_Tomography(const string&, int, int, float, int, float, float, float, float); void Position_Transducers(host_ptr<int>, host_ptr<int>, int); float norm(host_ptr<float>, int, int); //In-Line Functions inline int grid_size(int, int); template <typename T> __host__ __device__ void minmax(T &a, T &b); //Device Functions __global__ void propagation(kernel_ptr<int> const, kernel_ptr<int> const, kernel_ptr<float> const, kernel_ptr<float>, int, int, int, int, int); __global__ void propagation_at_corners(kernel_ptr<float>, int, int, int); __global__ void difference_signal(kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>, int, int, int); __global__ void backpropagation1(kernel_ptr<float>, kernel_ptr<float> const, int, int, int, int); __global__ void backpropagation2(kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, int, int, int, int); __global__ void laplace(kernel_ptr<float> const, kernel_ptr<float>, int, int, int); __global__ void laplace_corners(kernel_ptr<float> const, kernel_ptr<float>, int, int, int); __global__ void update_differential(kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, int, int, int); __global__ void update_field(kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float> const, float, int); __global__ void weights_differential1( kernel_ptr<float> norm, kernel_ptr<float> const df, kernel_ptr<float> const df_avg, int Ng) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int g = threadIdx.z + blockIdx.z * blockDim.z; if (i < NX && j < NY && g < Ng) { float val = df(i, j, g) - (df_avg(i, j) / Ng); atomicAdd( &norm(i, g), val * val); } } __global__ void weights_differential2( kernel_ptr<float> weights, kernel_ptr<float> total_weight, kernel_ptr<float> const norm, int Ng) { int g = threadIdx.x + blockIdx.x * blockDim.x; if (g < Ng) { float sum = 0.f; for (int i = 0; i < NX; ++i) { sum += norm(i, g); } weights(g) = 1.f / sqrtf(sum); atomicAdd( &total_weight(0), weights(g)); } } __global__ void average_differential( kernel_ptr<float> df_avg, kernel_ptr<float> const df, kernel_ptr<float> const weights, kernel_ptr<float> const total_weight, int Ng) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int g = threadIdx.z + blockIdx.z * blockDim.z; if (i < NX && j < NY && g < Ng) { float weight = weights(g) / total_weight(0); atomicAdd( &df_avg(i, j), df(i, j, g) * weight); } } /***************MAIN PROGRAM***************/ int main(int argc, char **argv) { //Command Line Argument Processing if (argc != 10) { cerr << "Usage: " << argv[0] << " <fo_filename> <group size> <parallel groups> <target epsilon> <max iterations> <omega> <alpha> <beta> <gamma>\n\n"; exit(1); } string fo_filename = argv[1]; if (count(fo_filename.begin(), fo_filename.end(), '.') != 1) { cerr << "Error: '" << fo_filename << "' should have only one period.\n" << " It should be in the current directory " << "and have only one filetype extension.\n\n"; exit(1); } int group_size = stoi(argv[2]); int Np = stoi(argv[3]); float target_epsilon = stof(argv[4]); int max_iterations = stoi(argv[5]); float omega = stof(argv[6]); float alpha = stof(argv[7]); float beta = stof(argv[8]); float gamma = stof(argv[9]); if (max_iterations == -1) max_iterations = numeric_limits<int>::max(); cout << setprecision(9); cerr << setprecision(9); Ultrasonic_Tomography(fo_filename, group_size, Np, target_epsilon, max_iterations, omega, alpha, beta, gamma); cudaDeviceReset(); } /**********HOST FUNCTION DEFINITIONS**********/ void Ultrasonic_Tomography(const string &fo_filename, int group_size, int Np, float target_epsilon, int max_iterations, float omega, float alpha, float beta, float gamma) { // fo(i, j) = // ground truth value at pos (i, j) of field host_ptr<float> fo(NX, NY); device_ptr<float> dev_fo(NX, NY); // Ng = number of sensor groups that will be launched in parallel int Ng = NS / group_size; // gg_xxx(i, k, g) = // initial signal at pos i in row/column xxx // at time k, from sensor group // e.g g_bottom stores the bottom row, // g_right stores the right column device_ptr<float> dev_g_bottom(NX, NT, Ng); device_ptr<float> dev_g_right(NY, NT, Ng); device_ptr<float> dev_g_top(NX, NT, Ng); device_ptr<float> dev_g_left(NY, NT, Ng); host_ptr<float> g_bottom(NX, NT, Ng); host_ptr<float> g_right(NY, NT, Ng); host_ptr<float> g_top(NX, NT, Ng); host_ptr<float> g_left(NY, NT, Ng); auto idx = fo_filename.find('.'); string basename = fo_filename.substr(0, idx); { ifstream fo_in(fo_filename); if (!fo_in) { cerr << "Error: '" << fo_filename << "' file not found in current directory.\n\n"; return; } string prefix = basename + "-data-"; string suffix = "-" + to_string(group_size) + ".txt"; string gb_name = prefix + "bottom" + suffix; string gr_name = prefix + "right" + suffix; string gt_name = prefix + "top" + suffix; string gl_name = prefix + "left" + suffix; ifstream gb_in(gb_name); ifstream gr_in(gr_name); ifstream gt_in(gt_name); ifstream gl_in(gl_name); if (!gb_in) { cerr << "Error: '" << gb_name << "' file not found in current directory.\n\n"; return; } if (!gr_in) { cerr << "Error: '" << gr_name << "' file not found in current directory.\n\n"; return; } if (!gt_in) { cerr << "Error: '" << gt_name << "' file not found in current directory.\n\n"; return; } if (!gl_in) { cerr << "Error: '" << gl_name << "' file not found in current directory.\n\n"; return; } read(fo_in, fo); copy(dev_fo, fo); read(gb_in, g_bottom); copy(dev_g_bottom, g_bottom); read(gr_in, g_right); copy(dev_g_right, g_right); read(gt_in, g_top); copy(dev_g_top, g_top); read(gl_in, g_left); copy(dev_g_left, g_left); } // Position of the transducers host_ptr<int> ii(NS); host_ptr<int> jj(NS); device_ptr<int> dev_ii(NS); device_ptr<int> dev_jj(NS); Position_Transducers(ii, jj, NS); // copy from host to device copy(dev_ii, ii); copy(dev_jj, jj); // u(i, j, k, g) = // wave propagation at pos (i, j) of field, at time k, from sensor group g device_ptr<float> dev_u(NX, NY, NT, Np); // Kaczmarz method // propagation // rr_xxx(i, k, g) = // difference signal between xxx sensors in u and gg_xxx // at time k, from sensor group g device_ptr<float> dev_rr_bottom(NX, NT, Ng); device_ptr<float> dev_rr_right(NX, NT, Ng); device_ptr<float> dev_rr_top(NX, NT, Ng); device_ptr<float> dev_rr_left(NX, NT, Ng); // z(i, j, k, g) = // wave back propagation at pos (i, j) of field, // at time k, from sensor group g device_ptr<float> dev_z(NX, NY, NT+1, Np); // Lu(i, j, k, g) = // result of applying the Laplace operator to u(i, j, k, g) device_ptr<float> dev_Lu(NX, NY, NT, Np); // f(i, j) = // current reconstruction of field at pos (i, j) host_ptr<float> f(NX, NY); device_ptr<float> dev_f(NX, NY); // df(i, j, g) = // discretized differential of f(i, j) from sensor group g device_ptr<float> dev_df(NX, NY, Ng); device_ptr<float> dev_df_avg(NX, NY); device_ptr<float> dev_norm(NX, Ng); device_ptr<float> dev_weights(Ng); device_ptr<float> dev_total_weight(1); // f_minus_fo(i, j) // difference of field and ground truth at pos (i, j) host_ptr<float> f_minus_fo(NX, NY); device_ptr<float> dev_f_minus_fo(NX, NY); // initialize epsilon values float prev_epsilon = 100.f; float curr_epsilon = -std::numeric_limits<float>::infinity(); float file_epsilon = std::numeric_limits<float>::infinity(); /* cerr << "writing convergence to 'sirt_convergence.txt'...\n" */ /* << "writing time to 'sirt_time.txt'...\n\n"; */ ofstream convergence_file("sirt_convergence.txt"); ofstream time_file("sirt_time.txt"); // kernel launch parameters for propagation dim3 threads_prop(32, 1, 4); dim3 grid_prop( grid_size(NX, threads_prop.x), grid_size(NY, threads_prop.y), grid_size(Np, threads_prop.z)); // kernel launch parameters for propagation_at_corners dim3 threads_prop_corners(32, 1); dim3 grid_prop_corners( grid_size(NT, threads_prop_corners.x), grid_size(Np, threads_prop_corners.y)); // kernel launch parameters for difference_signal dim3 threads_diff_signal(NX, 1, 1); dim3 grid_diff_signal( grid_size(NX, threads_diff_signal.x), grid_size(NT, threads_diff_signal.y), grid_size(Np, threads_diff_signal.z)); // kernel launch parameters for backpropagation1 dim3 threads_bp1(64, 2, 1); dim3 grid_bp1( grid_size(NX, threads_bp1.x), grid_size(NY, threads_bp1.y), grid_size(Np, threads_bp1.z)); // kernel launch parameters for backpropagation2 dim3 threads_bp2(32, 1); dim3 grid_bp2( grid_size(NX, threads_bp2.x), grid_size(Np, threads_bp2.y)); // kernel launch parameters for laplace dim3 threads_L(32, 2, 2); dim3 grid_L( grid_size(NX * NY, threads_L.x), grid_size(NT, threads_L.y), grid_size(Np, threads_L.z)); // kernel launch parameters for laplace_corners dim3 threads_L_corners(96, 1, 1); dim3 grid_L_corners( grid_size(NX * NY, threads_L.x), grid_size(NT, threads_L.y), grid_size(Np, threads_L.z)); // kernel launch parameters for update_differential dim3 threads_diff(64, 2, 2); dim3 grid_diff( grid_size(NX * NY, threads_diff.x), grid_size(NT, threads_diff.y), grid_size(Np, threads_diff.z)); // kernel launch parameters for field kernels dim3 threads_field(NX, 1); dim3 grid_field( grid_size(NX, threads_field.x), grid_size(NY, threads_field.y)); dim3 threads_weights1(NX, 1, 1); dim3 grid_weights1( grid_size(NX, threads_weights1.x), grid_size(NY, threads_weights1.y), grid_size(Ng, threads_weights1.z)); dim3 threads_weights2(1); dim3 grid_weights2( grid_size(Ng, threads_weights2.x)); dim3 threads_avg_diff(NX, 1, 1); dim3 grid_avg_diff( grid_size(NX, threads_avg_diff.x), grid_size(NY, threads_avg_diff.y), grid_size(Ng, threads_avg_diff.z)); cerr << "group size: " << group_size << "\n" << "target epsilon: " << target_epsilon << "\n" << "omega: " << omega << "\n" << "alpha: " << alpha << "\n" << "beta: " << beta << "\n" << "gamma: " << gamma << "\n\n"; int w_iter = 6; int w_eps = 12; int w_diff = 15; cout << setw(w_iter) << "iter" << " " << setw(w_eps) << "epsilon" << " " << setw(w_diff) << "difference" << " \n" << string(w_iter, '-') << " " << string(w_eps, '-') << " " << string(w_diff, '-') << " \n"; cudaDeviceSynchronize(); int ti = clock(); bool diverged = false; bool reached = false; int iter = 0; while (!reached && !diverged && iter < max_iterations) { ++iter; dev_df.set(0.f); dev_df_avg.set(0.f); dev_total_weight.set(0.f); dev_norm.set(0.f); int total_steps = ceil((float)Ng / Np); for (int step = 0; step < total_steps; ++step) { dev_u.set(0.f); dev_z.set(0.f); dev_Lu.set(0.f); // propagate wave over field, store in u for (int k = 1; k < NT - 1; ++k) { propagation<<<grid_prop, threads_prop>>>(dev_ii, dev_jj, dev_f, dev_u, k, group_size, step, Np, Ng); } propagation_at_corners<<<grid_prop_corners, threads_prop_corners>>>(dev_u, step, Np, Ng); // store difference signal of u at sensor positions and initial signal at g in rr difference_signal<<<grid_diff_signal, threads_diff_signal>>>(dev_u, dev_g_bottom, dev_g_right, dev_g_top, dev_g_left, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, step, Np, Ng); // do back propagation of wave over field, store in z for (int k = NT - 2; k > 0; k--) { backpropagation1<<<grid_bp1, threads_bp1>>>(dev_z, dev_f, k, step, Np, Ng); backpropagation2<<<grid_bp2, threads_bp2>>>(dev_z, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, k, step, Np, Ng); } // apply Laplace operator to u, store in Lu laplace<<<grid_L, threads_L>>>(dev_u, dev_Lu, step, Np, Ng); laplace_corners<<<grid_L_corners, threads_L_corners>>>(dev_u, dev_Lu, step, Np, Ng); // update differential of f, store in df update_differential<<<grid_diff, threads_diff>>>(dev_df, dev_df_avg, dev_z, dev_Lu, dev_f, step, Np, Ng); } weights_differential1<<<grid_weights1, threads_weights1>>>(dev_norm, dev_df, dev_df_avg, Ng); dev_df_avg.set(0.f); weights_differential2<<<grid_weights2, threads_weights2>>>(dev_weights, dev_total_weight, dev_norm, Ng); average_differential<<<grid_avg_diff, threads_avg_diff>>>(dev_df_avg, dev_df, dev_weights, dev_total_weight, Ng); float scale = omega * (alpha + (beta / pow(iter, gamma))); // update f and f_minus_fo update_field<<<grid_field, threads_field>>>(dev_f, dev_df_avg, dev_f_minus_fo, dev_fo, scale, Ng); // error calculation // copy from device to host copy(f_minus_fo, dev_f_minus_fo); curr_epsilon = norm(f_minus_fo, NX, NY) / norm(fo, NX, NY) * 100.f; float current_t = (float)(clock()-ti) / CLOCKS_PER_SEC; if (abs(file_epsilon - curr_epsilon) > 0.2f) { convergence_file << curr_epsilon << " "; time_file << current_t << " "; file_epsilon = curr_epsilon; } cout << setw(w_iter) << iter << " " << setw(w_eps) << curr_epsilon << " " << setw(w_diff) << prev_epsilon - curr_epsilon << " " << scale << " \n"; // check ending conditions reached = curr_epsilon <= target_epsilon; diverged = curr_epsilon > prev_epsilon || std::isnan(curr_epsilon); // update prev_epsilon prev_epsilon = curr_epsilon; } if (reached) { cerr << "reached target epsilon: " << target_epsilon << ", at iter: " << iter << ", epsilon: " << curr_epsilon << "\n\n"; } else if (diverged) { cerr << "diverged at iter: " << iter << ", epsilon: " << curr_epsilon << "\n\n"; } else { cerr << "stopped at iter: " << iter << ", epsilon: " << curr_epsilon << "\n\n"; } cudaDeviceSynchronize(); int tf = clock(); float elapsed_time = (float)(tf - ti) / CLOCKS_PER_SEC; cout << endl; cerr << "total time (s): " << elapsed_time << "\n" << "per iteration (s): " << elapsed_time / iter << "\n"; // copy from device to host copy(f, dev_f); string f_name = "sirt-norm-" + to_string(group_size) + "-" + basename + ".txt"; /* cerr << "writing to '" << f_name << "'...\n\n"; */ ofstream f_out(f_name); write(f_out, f); size_t free, total; cudaMemGetInfo(&free, &total); cerr << "used mem: " << float(total - free) / (1024 * 1024) << " MB\n" << "free mem: " << float(free) / (1024 * 1024) << " MB\n" << "total mem: " << float(total) / (1024 * 1024) << " MB\n\n"; } float norm(host_ptr<float> A, int nx, int ny) { float sum = 0; for (int j = 0; j < ny; ++j) for (int i = 0; i < nx; ++i) sum += A(i, j) * A(i, j); return sqrtf(sum); } void Position_Transducers(host_ptr<int> ii, host_ptr<int> jj, int num) { //returns the (x,y) coordinates of the number of total transducers int p = 0; for(p = 0; p < 160; p++) { ii(p) = 21 + (p + 1); jj(p) = 181; } for(p = 160; p < 320; p++) { ii(p) = 181; jj(p) = 181 - ((p + 1) - 160); } for(p = 320; p < 480; p++) { ii(p) = 181 - ((p + 1) - 320); jj(p) = 21; } for(p = 480; p < num; p++) { ii(p) = 21; jj(p) = 21 + ((p + 1) - 480); } } /**********DEVICE FUNCTION DEFINITIONS***********/ __global__ void propagation( kernel_ptr<int> const ii, kernel_ptr<int> const jj, kernel_ptr<float> const f, kernel_ptr<float> u, int k, int group_size, int step, int Np, int Ng) { // Map from threadIdx / BlockIdx to pixel position int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int p = threadIdx.z + blockIdx.z * blockDim.z; int g = p + Np * step; if(i < NX && j < NY && p < Np && g < Ng) { float v = 1500.f * sqrtf(1.f + f(i, j)); float r = v * DT / HX; float s = 2.f - 4.f * r * r; float val; // will hold new u at (i, j, k + 1) // not at boundary if (i != 0 && i != NX - 1 && j != 0 && j != NY - 1) { val = r * r * (u(i+1, j, k, p) + u(i-1, j, k, p) + u(i, j-1, k, p) + u(i, j+1, k, p)) + s * u(i, j, k, p) - u(i, j, k-1, p); int sensor_idx = g * group_size; int jp1 = jj(sensor_idx); int jp2 = jj(sensor_idx + group_size - 1); int ip1 = ii(sensor_idx); int ip2 = ii(sensor_idx + group_size - 1); minmax(jp1, jp2); minmax(ip1, ip2); // at sensor, k <= 24 if (j + 1 >= jp1 && j + 1 <= jp2 && i + 1 >= ip1 && i + 1 <= ip2 && k + 1 <= 24) { float t = k * DT - TT; // add wave value val += v * v * DT * DT * cosf(OMEGAC * t) * expf(-(t * t) / (2.f * TAO * TAO)); } } // at boundary else { // boundary booleans bool top = (j == 0); bool bottom = (j == NY - 1); bool left = (i == 0); bool right = (i == NX - 1); // index variables for different boundary cases int ja = top ? (j + 1) : bottom ? (j - 1) : j; int jb = top ? (j + 2) : bottom ? (j - 2) : j; int ia = left ? (i + 1) : right ? (i - 1) : i; int ib = left ? (i + 2) : right ? (i - 2) : i; val = (2.f - 2.f * r - r * r) * u(i, j, k, p) + 2.f * r * (1.f + r) * u(ia, ja, k, p) - r * r * u(ib, jb, k, p) + (2.f * r - 1.f) * u(i, j, k-1, p) - 2.f * r * u(ia, ja, k-1, p); } u(i, j, k+1, p) = val; } } __global__ void propagation_at_corners( kernel_ptr<float> u, int step, int Np, int Ng) { int k = threadIdx.x + blockIdx.x * blockDim.x; int p = threadIdx.y + blockIdx.y * blockDim.y; int g = p + Np * step; if (k < NT && g < Ng && p < Np) { u(0, 0, k, p) = 1.f / 2.f * (u(0, 1, k, p) + u(1, 0, k, p)); u(NX-1, 0, k, p) = 1.f / 2.f * (u(NX-2, 0, k, p) + u(NX-1, 1, k, p)); u(0, NY-1, k, p) = 1.f / 2.f * (u(0, NY-2, k, p) + u(1, NY-1, k, p)); u(NX-1, NY-1, k, p) = 1.f / 2.f * (u(NX-2, NY-1, k, p) + u(NX-1, NY-2, k, p)); } } __global__ void difference_signal( kernel_ptr<float> const u, kernel_ptr<float> const g_bottom, kernel_ptr<float> const g_right, kernel_ptr<float> const g_top, kernel_ptr<float> const g_left, kernel_ptr<float> rr_bottom, kernel_ptr<float> rr_right, kernel_ptr<float> rr_top, kernel_ptr<float> rr_left, int step, int Np, int Ng) { int i = threadIdx.x + blockIdx.x * blockDim.x; int k = threadIdx.y + blockIdx.y * blockDim.y; int p = threadIdx.z + blockIdx.z * blockDim.z; int g = p + Np * step; if (i > 20 && i < 180 && k > 1 && k < NT && g < Ng && p < Np) { // store difference at time k of original signal // and current signal at bottom sensor row rr_bottom(i, k, g) = g_bottom(i, k, g) - u(i, 180, k, p); // store difference at time k of original signal // and current signal at top sensor row rr_top(i, k, g) = g_top(i, k, g) - u(i, 20, k, p); // store difference at time k of original signal // and current signal at right sensor column rr_right(i, k, g) = g_right(i, k, g) - u(180, i, k, p); // store difference at time k of original signal // and current signal at left sensor column rr_left(i, k, g) = g_left(i, k, g) - u(20, i, k, p); } } __global__ void backpropagation1( kernel_ptr<float> z, kernel_ptr<float> const f, int k, int step, int Np, int Ng) { // Map from threadIdx / BlockIdx to pixel position int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int p = threadIdx.z + blockIdx.z * blockDim.z; int g = p + Np * step; if(i >= 1 && i < (NX - 1) && j >= 1 && j < (NY - 1) && g < Ng && p < Np) { z(i, j, k, p) = 1500.f * 1500.f * (DT * DT) * ((1.f + f(i, j-1)) * z(i, j-1, k+1, p) + (1.f + f(i, j+1)) * z(i, j+1, k+1, p) + (1.f + f(i-1, j)) * z(i-1, j, k+1, p) + (1.f + f(i+1, j)) * z(i+1, j, k+1, p) - 4.f * (1.f + f(i, j)) * z(i, j, k+1, p)) / (H * H) + 2.f * z(i, j, k+1, p) - z(i, j, k+2, p); } } __global__ void backpropagation2( kernel_ptr<float> z, kernel_ptr<float> const rr_bottom, kernel_ptr<float> const rr_right, kernel_ptr<float> const rr_top, kernel_ptr<float> const rr_left, int k, int step, int Np, int Ng) { int i = threadIdx.x + blockIdx.x * blockDim.x; int p = threadIdx.y + blockIdx.y * blockDim.y; int g = p + Np * step; if (g < Ng && p < Np) { if(i >= 21 && i < 180) { z(i, 180, k, p) = z(i, 179, k, p) + rr_bottom(i, k, g) * H * 1000.f; z(i, 20, k, p) = z(i, 21, k, p) + rr_top(i, k, g) * H * 1000.f; z(180, i, k, p) = z(179, i, k, p) + rr_right(i, k, g) * H * 1000.f; z(20, i, k, p) = z(21, i, k, p) + rr_left(i, k, g) * H * 1000.f; } if (i >= 1 && i < (NX - 1)) { z(i, 0, k, p) = z(i, 1, k, p); z(i, NY-1, k, p) = z(i, NY-2, k, p); z(0, i, k, p) = z(1, i, k, p); z(NX-1, i, k, p) = z(NX-2, i, k, p); } else if (i == 0) { z(0, 0, k, p) = (z(1, 0, k, p) + z(0, 1, k, p)) / 2.f; z(NX-1, 0, k, p) = (z(NX-2, 0, k, p) + z(NX-1, 1, k, p)) / 2.f; z(0, NY-1, k, p) = (z(1, NY-1, k, p) + z(0, NY-2, k, p)) / 2.f; z(NX-1, NY-1, k, p) = (z(NX-2, NY-1, k, p) + z(NX-1, NY-2, k, p)) / 2.f; } } } __global__ void laplace( kernel_ptr<float> const u, kernel_ptr<float> Lu, int step, int Np, int Ng) { // Map from threadIdx / BlockIdx to pixel position int tx = threadIdx.x + blockIdx.x * blockDim.x; int k = threadIdx.y + blockIdx.y * blockDim.y; int p = threadIdx.z + blockIdx.z * blockDim.z; int g = p + Np * step; if (tx < (NX * NY) && (k + 1) < NT && g < Ng && p < Np) { int i = tx % NX; int j = tx / NX; int ja = (j > 0) ? (j - 1) : j; int jb = (j < NY - 1) ? (j + 1) : j; int ia = (i > 0) ? (i - 1) : i; int ib = (i < NX - 1) ? (i + 1) : i; Lu(i, j, k+1, p) = (u(i, ja, k+1, p) + u(i, jb, k+1, p) + u(ia, j, k+1, p) + u(ib, j, k+1, p) - 4.f * u(i, j, k+1, p)) / (H * H); } } __global__ void laplace_corners( kernel_ptr<float> const u, kernel_ptr<float> Lu, int step, int Np, int Ng) { int k = threadIdx.x + blockIdx.x * blockDim.x; int p = threadIdx.y + blockIdx.y * blockDim.y; int g = p + Np * step; if ((k + 1) < NT && g < Ng && p < Np) { Lu(0, 0, k+1, p) = (Lu(1, 0, k+1, p) + Lu(0, 1, k+1, p)) / 2.f; Lu(NX-1, 0, k+1, p) = (Lu(NX-2, 0, k+1, p) + Lu(NX-1, 1, k+1, p)) / 2.f; Lu(0, NY-1, k+1, p) = (Lu(1, NY-1, k+1, p) + Lu(0, NY-2, k+1, p)) / 2.f; Lu(NX-1, NY-1, k+1, p) = (Lu(NX-2, NY-1, k+1, p) + Lu(NX-1, NY-2, k+1, p)) / 2.f; } } __global__ void update_differential( kernel_ptr<float> df, kernel_ptr<float> df_avg, kernel_ptr<float> const z, kernel_ptr<float> const Lu, kernel_ptr<float> const f, int step, int Np, int Ng) { // Map from threadIdx / BlockIdx to pixel position int tx = threadIdx.x + blockIdx.x * blockDim.x; int k = threadIdx.y + blockIdx.y * blockDim.y; int p = threadIdx.z + blockIdx.z * blockDim.z; int g = p + Np * step; if (tx < (NX * NY) && (k + 1) < NT && g < Ng && p < Np) { int i = tx % NX; int j = tx / NX; float val = z(i, j, k+1, p) * Lu(i, j, k+1, p) / (1.f + f(i, j)); atomicAdd(&df(i, j, g), val); atomicAdd(&df_avg(i, j), val); } } __global__ void update_field( kernel_ptr<float> f, kernel_ptr<float> const df_avg, kernel_ptr<float> f_minus_fo, kernel_ptr<float> const fo, float scale, int Ng) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < NX && j < NY) { bool in_sensor_field = (i >= 21) && (i < 180) && (j >= 21) && (j < 180); if (in_sensor_field) f(i, j) += scale * df_avg(i, j); f_minus_fo(i, j) = f(i, j) - fo(i, j); } } /**********INLINE FUNCTION DEFINITIONS**********/ inline int grid_size(int n, int threads) { return ceil(float(n) / threads); } // POST-CONDITION: a <= b template <typename T> __host__ __device__ void minmax(T &a, T &b) { if (a > b) { int t = a; a = b; b = t; } }
ac6148d95dbb2d9f92b7b5080b98655326205387.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <cutil.h> #include <gpuewwvpot_kernel.cu> //////////////////////////////////////////////////////////////////////////////// extern "C" void gpuewwvpot_ (double*, int, double*, int, double*, double *); void gpuewwvpot__ (double*, int, double*, int, double*, double *); extern "C" void gpuewwvpot_ (double* xq, int num_atm, double* g_vec, int num_k, double* force, double *tpot) { #ifndef CUDA_SDK_2 CUT_DEVICE_INIT(); #endif // CUT_CHECK_DEVICE(); // double tpot[1]; unsigned int size_A = ((num_atm+THD-1)/THD*THD) * 4; unsigned int mem_size_A = sizeof(float) * size_A; float* xq_float = (float*) malloc(mem_size_A); unsigned int size_B = ((num_k+THD-1)/THD*THD) * 4; unsigned int mem_size_B = sizeof(float) * size_B; float* gv_float = (float*) malloc(mem_size_B); unsigned int size_C = ((num_k+THD-1)/THD*THD) * 3; unsigned int mem_size_C = sizeof(float) * size_C; float* pot_float = (float*) malloc(mem_size_C); unsigned int size_D = ((num_atm+THD-1)/THD*THD) * 3; unsigned int mem_size_D = sizeof(float) * size_D; float* f_float = (float*) malloc(mem_size_D); // unsigned int mem_size_E = sizeof(double) * size_D; // double* force_double = (double*) malloc(mem_size_E); //double stime,ltime; for (int i = 0; i < size_A ; i++){ if(i<num_atm*4) xq_float[i] = (float)xq[i]; else xq_float[i] = 0.0f; } for (int i = 0; i < size_B; i++){ if(i<num_k*4) gv_float[i] = (float)g_vec[i]; else gv_float[i] = 0.0f; //printf("%16.6f %d \n",gv_float[i],i); } /* for (int i = 0; i < num_k; i++) { printf("%16.6f %d \n",gv_float[i*4+3],i); }*/ for (int i = 0; i < size_D; i++) { f_float[i] = 0.e0; // force_double[i] = 0.e0; } //get_cputime(&ltime,&stime); float* d_A; CUDA_SAFE_CALL(hipMalloc((void**) &d_A, mem_size_A)); CUDA_SAFE_CALL(hipMemcpy(d_A, xq_float, mem_size_A,hipMemcpyHostToDevice) ); float* d_B; CUDA_SAFE_CALL(hipMalloc((void**) &d_B, mem_size_B)); CUDA_SAFE_CALL(hipMemcpy(d_B, gv_float, mem_size_B,hipMemcpyHostToDevice) ); float* d_C; CUDA_SAFE_CALL(hipMalloc((void**) &d_C, mem_size_C)); float* d_D; CUDA_SAFE_CALL(hipMalloc((void**) &d_D, mem_size_D)); dim3 threads(THD); dim3 grid((num_k+THD-1) / THD); hipLaunchKernelGGL(( gpuewwvpot_kernel), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, num_atm); CUT_CHECK_ERROR("Kernel execution failed"); #if 1 CUDA_SAFE_CALL(hipMemcpy(pot_float, d_C, mem_size_C,hipMemcpyDeviceToHost) ); *tpot=0.0; for(int i=0;i<num_k;i++) *tpot+=pot_float[i*3+2]; (*tpot)*=0.5; // printf("tpot=%e 0:%f\n",*tpot); #endif dim3 grid2((num_atm+THD-1) / THD); hipLaunchKernelGGL(( gpuewwvpot_kernel2), dim3(grid2), dim3(threads) , 0, 0, d_C, d_B, d_A, d_D, num_k); CUT_CHECK_ERROR("Kernel execution failed"); CUDA_SAFE_CALL(hipMemcpy(f_float, d_D, mem_size_D,hipMemcpyDeviceToHost) ); //get_cputime(&ltime,&stime); //printf("GPU Processing time: %10.3f (sec)\n", stime); // host cumputation //////////////////////////////////////// // get_cputime(&ltime,&stime); // computeGold_d(force_double, g_vec, xq, num_atm, num_k); // get_cputime(&ltime,&stime); // printf("HOST Processing time: %10.3f (sec)\n", stime); // post preccess //////////////////////////////////////////// //double err; for (int i = 0; i < num_atm; ++i){ //printf("%16.6f %16.6f %d \n",f_float[i*3],force_double[i*3],i); force[i*3] = (double)f_float[i*3]; force[i*3+1] = (double)f_float[i*3+1]; force[i*3+2] = (double)f_float[i*3+2]; /* err += fabs((force_double[i*3] - (double)f_float[i*3]) / force_double[i*3]); err += fabs((force_double[i*3+1] - (double)f_float[i*3+1]) / force_double[i*3+1]); err += fabs((force_double[i*3+2] - (double)f_float[i*3+2]) / force_double[i*3+2]);*/ /* force[i*3] = force_double[i*3]; force[i*3+1] = force_double[i*3+1]; force[i*3+2] = force_double[i*3+2]; */ } //err = err / (3.e0 * (double)num_atm) * 100.e0; //printf("err : %20.8f \n",err); //printf("GPU : %20.8f \n",sum_gpu); //printf("GPU : %20.8f \n",force[0]); //printf("HOST: %20.8f \n",force_double[0]); CUDA_SAFE_CALL(hipFree(d_A)); CUDA_SAFE_CALL(hipFree(d_B)); CUDA_SAFE_CALL(hipFree(d_C)); CUDA_SAFE_CALL(hipFree(d_D)); free(xq_float); free(gv_float); free(f_float); free(pot_float); //free(force_double); } extern "C" void gpuewwvpot__ (double* xq, int* num_atm, double* g_vec, int* num_k, double* force, double *tpot) { gpuewwvpot_ (xq,*num_atm,g_vec,*num_k,force,tpot); }
ac6148d95dbb2d9f92b7b5080b98655326205387.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <cutil.h> #include <gpuewwvpot_kernel.cu> //////////////////////////////////////////////////////////////////////////////// extern "C" void gpuewwvpot_ (double*, int, double*, int, double*, double *); void gpuewwvpot__ (double*, int, double*, int, double*, double *); extern "C" void gpuewwvpot_ (double* xq, int num_atm, double* g_vec, int num_k, double* force, double *tpot) { #ifndef CUDA_SDK_2 CUT_DEVICE_INIT(); #endif // CUT_CHECK_DEVICE(); // double tpot[1]; unsigned int size_A = ((num_atm+THD-1)/THD*THD) * 4; unsigned int mem_size_A = sizeof(float) * size_A; float* xq_float = (float*) malloc(mem_size_A); unsigned int size_B = ((num_k+THD-1)/THD*THD) * 4; unsigned int mem_size_B = sizeof(float) * size_B; float* gv_float = (float*) malloc(mem_size_B); unsigned int size_C = ((num_k+THD-1)/THD*THD) * 3; unsigned int mem_size_C = sizeof(float) * size_C; float* pot_float = (float*) malloc(mem_size_C); unsigned int size_D = ((num_atm+THD-1)/THD*THD) * 3; unsigned int mem_size_D = sizeof(float) * size_D; float* f_float = (float*) malloc(mem_size_D); // unsigned int mem_size_E = sizeof(double) * size_D; // double* force_double = (double*) malloc(mem_size_E); //double stime,ltime; for (int i = 0; i < size_A ; i++){ if(i<num_atm*4) xq_float[i] = (float)xq[i]; else xq_float[i] = 0.0f; } for (int i = 0; i < size_B; i++){ if(i<num_k*4) gv_float[i] = (float)g_vec[i]; else gv_float[i] = 0.0f; //printf("%16.6f %d \n",gv_float[i],i); } /* for (int i = 0; i < num_k; i++) { printf("%16.6f %d \n",gv_float[i*4+3],i); }*/ for (int i = 0; i < size_D; i++) { f_float[i] = 0.e0; // force_double[i] = 0.e0; } //get_cputime(&ltime,&stime); float* d_A; CUDA_SAFE_CALL(cudaMalloc((void**) &d_A, mem_size_A)); CUDA_SAFE_CALL(cudaMemcpy(d_A, xq_float, mem_size_A,cudaMemcpyHostToDevice) ); float* d_B; CUDA_SAFE_CALL(cudaMalloc((void**) &d_B, mem_size_B)); CUDA_SAFE_CALL(cudaMemcpy(d_B, gv_float, mem_size_B,cudaMemcpyHostToDevice) ); float* d_C; CUDA_SAFE_CALL(cudaMalloc((void**) &d_C, mem_size_C)); float* d_D; CUDA_SAFE_CALL(cudaMalloc((void**) &d_D, mem_size_D)); dim3 threads(THD); dim3 grid((num_k+THD-1) / THD); gpuewwvpot_kernel<<< grid, threads >>>(d_C, d_A, d_B, num_atm); CUT_CHECK_ERROR("Kernel execution failed"); #if 1 CUDA_SAFE_CALL(cudaMemcpy(pot_float, d_C, mem_size_C,cudaMemcpyDeviceToHost) ); *tpot=0.0; for(int i=0;i<num_k;i++) *tpot+=pot_float[i*3+2]; (*tpot)*=0.5; // printf("tpot=%e 0:%f\n",*tpot); #endif dim3 grid2((num_atm+THD-1) / THD); gpuewwvpot_kernel2<<< grid2, threads >>>(d_C, d_B, d_A, d_D, num_k); CUT_CHECK_ERROR("Kernel execution failed"); CUDA_SAFE_CALL(cudaMemcpy(f_float, d_D, mem_size_D,cudaMemcpyDeviceToHost) ); //get_cputime(&ltime,&stime); //printf("GPU Processing time: %10.3f (sec)\n", stime); // host cumputation //////////////////////////////////////// // get_cputime(&ltime,&stime); // computeGold_d(force_double, g_vec, xq, num_atm, num_k); // get_cputime(&ltime,&stime); // printf("HOST Processing time: %10.3f (sec)\n", stime); // post preccess //////////////////////////////////////////// //double err; for (int i = 0; i < num_atm; ++i){ //printf("%16.6f %16.6f %d \n",f_float[i*3],force_double[i*3],i); force[i*3] = (double)f_float[i*3]; force[i*3+1] = (double)f_float[i*3+1]; force[i*3+2] = (double)f_float[i*3+2]; /* err += fabs((force_double[i*3] - (double)f_float[i*3]) / force_double[i*3]); err += fabs((force_double[i*3+1] - (double)f_float[i*3+1]) / force_double[i*3+1]); err += fabs((force_double[i*3+2] - (double)f_float[i*3+2]) / force_double[i*3+2]);*/ /* force[i*3] = force_double[i*3]; force[i*3+1] = force_double[i*3+1]; force[i*3+2] = force_double[i*3+2]; */ } //err = err / (3.e0 * (double)num_atm) * 100.e0; //printf("err : %20.8f \n",err); //printf("GPU : %20.8f \n",sum_gpu); //printf("GPU : %20.8f \n",force[0]); //printf("HOST: %20.8f \n",force_double[0]); CUDA_SAFE_CALL(cudaFree(d_A)); CUDA_SAFE_CALL(cudaFree(d_B)); CUDA_SAFE_CALL(cudaFree(d_C)); CUDA_SAFE_CALL(cudaFree(d_D)); free(xq_float); free(gv_float); free(f_float); free(pot_float); //free(force_double); } extern "C" void gpuewwvpot__ (double* xq, int* num_atm, double* g_vec, int* num_k, double* force, double *tpot) { gpuewwvpot_ (xq,*num_atm,g_vec,*num_k,force,tpot); }
3a81d610bc53cfda77b40942bcf20274c850e57e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <bits/stdc++.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> using namespace std; const int MAX_FES = 50000; int NL = 10; int LS = 4; int dim1 = 20; int dim2 = 1000000; const int num_points = NL*LS; const double phi = 0.1; const double tol = 0.000001; struct coor { double a0,a1,a2,a3,a4,a5,a6; }; struct coor1 { double* a1; double a2; double a3; double* a4; double a5; double a0; }; __device__ void func_eval_input_data(coor1* d_coo, double* y, double* sum) { // int tId = threadIdx.x + (blockIdx.x * blockDim.x); double temp = (*y); *sum *= temp; } __host__ __device__ void func_eval_input_data(coor1* d_coo, double* x, double* y, double* total) { double* d_sum, *sum; sum = (double *) malloc(sizeof(double)); *sum = 1; // hipMalloc((void **)&sum,sizeof(double)); // int tId = threadIdx.x + (blockIdx.x * blockDim.x); // func_eval_input_data<<<(*x)/512,512>>>(d_coo, y, d_sum); *total += *sum; free(sum); } __host__ __device__ double eval(double x, double y) { // Function to be evaluated. Can be changed return sin(x)*cos(y); } /* bool comp(coor temp1, coor temp2) { // Comparator function for std::sort() return temp1.a2 < temp2.a2; } */ __host__ __device__ bool operator < (const coor& a,const coor& b){return a.a2 < b.a2;} __global__ void noicetoit(coor* h_coo, int* levels, double* best_x, double* best_y, double* best_fx, int* i, int* LS, int* NL) { int tId = threadIdx.x + (blockIdx.x * blockDim.x); hiprandState_t state; hiprand_init((unsigned long long)clock() + tId, 0, 0, &state); int index = threadIdx.x + blockIdx.x * blockDim.x; double rand1 = hiprand_uniform_double(&state); double rand2 = hiprand_uniform_double(&state); double rand3 = hiprand_uniform_double(&state); double rand4 = hiprand_uniform_double(&state); double rand5 = hiprand_uniform_double(&state); double rand6 = hiprand_uniform_double(&state); double rand7 = hiprand_uniform_double(&state); int pt = levels[(*i-1)*(*NL) + index]; // Choosing a random level int lev1 = (int)rand1 % (*i); int lev2 = (int)rand2 % (*i); if(lev2 < lev1) { int temp = lev2; lev2 = lev1; lev1 = temp; } // Choosing random points from those levels int pt1 = (int)rand3 % (*LS); int pt2 = (int)rand4 % (*LS); int temp1 = levels[lev1*(*NL) + pt1]; int temp2 = levels[lev2*(*NL) + pt2]; int r1 = ((double) rand5 / (RAND_MAX)); int r2 = ((double) rand6 / (RAND_MAX)); int r3 = ((double) rand7 / (RAND_MAX)); // Update Functions h_coo[pt].a4 = r1*(h_coo[pt].a4) + r2*((h_coo[temp1].a0) - (h_coo[pt].a0)) + phi*r3*((h_coo[temp2].a0) - (h_coo[pt].a0)); h_coo[pt].a5 = r1*h_coo[pt].a5 + r2*(h_coo[temp1].a1 - h_coo[pt].a1) + phi*r3*(h_coo[temp2].a1 - h_coo[pt].a1); h_coo[pt].a0 = h_coo[pt].a0 + h_coo[pt].a4; h_coo[pt].a1 = h_coo[pt].a1 + h_coo[pt].a5; double fx = eval(h_coo[pt].a0, h_coo[pt].a1); if(abs(fx - *best_fx) < tol) { *best_x = h_coo[pt].a0; *best_y = h_coo[pt].a1; *best_fx = fx; } h_coo[pt].a2 = fx; } int main() { clock_t tStart = clock(); int fes = 0; // Domain for coordinates int coor_low_lim = -10; int coor_high_lim = 50; // Domain for velocities double vel_low_lim = -0.1; double vel_high_lim = 0.1; double best_x; double best_y; double best_fx; //parallel programming coor* h_coo, *d_coo; h_coo = (coor *)malloc(num_points*sizeof(coor)); coor1* h_coo1, *d_coo1; h_coo1 = (coor1 *)malloc(num_points*sizeof(coor)); for(int i=0; i<num_points; i++) { h_coo1[i].a1 = (double *)malloc(dim1*sizeof(double)); h_coo1[i].a4 = (double *)malloc(dim1*sizeof(double)); } hipMalloc((void **)&d_coo, num_points*sizeof(coor)); int *levels, *d_levels; levels = (int *)malloc(sizeof(int)*NL*LS); hipMalloc((void **)&d_levels, sizeof(int)*NL*LS); // vector<vector<int> > levels(NL, vector<int> (LS, 0)); // Seeding random srand(static_cast <unsigned> (time(0))); for(int i=0; i< num_points; i++) { /* coor[i][...] contains property of each point. coor[i][0] -> x-coordinate coor[i][1] -> y-coordinate coor[i][2] -> function evaluation coor[i][3] -> level number coor[i][4] -> x-velocity coor[i][5] -> y-velocity */ h_coo[i].a0 = (coor_low_lim + static_cast <double> (rand()) /( static_cast <double> (RAND_MAX/(coor_high_lim - coor_low_lim)))); h_coo[i].a1 = (coor_low_lim + static_cast <double> (rand()) /( static_cast <double> (RAND_MAX/(coor_high_lim - coor_low_lim)))); h_coo[i].a2 = eval(h_coo[i].a0, h_coo[i].a1); h_coo[i].a4 = (vel_low_lim + static_cast <double> (rand()) /( static_cast <double> (RAND_MAX/(vel_high_lim - vel_low_lim)))); h_coo[i].a5 = (vel_low_lim + static_cast <double> (rand()) /( static_cast <double> (RAND_MAX/(vel_high_lim - vel_low_lim)))); } fes += num_points; hipMemcpy(d_coo, h_coo, num_points*sizeof(coor), hipMemcpyHostToDevice); while(fes < MAX_FES) { thrust::sort(h_coo, h_coo + num_points); best_fx = h_coo[0].a2; best_x = h_coo[0].a0; best_y = h_coo[0].a1; // Segregating points into levels for(int i=0; i<num_points; i++) { /* Levels basically acts as a lookup for coor array. Dimensions of levels: levels[NL][LS] levels[i] denotes (i+1)th level and each element in levels[i][...] denotes the number of the point in the coor array. For instance, levels[1][2] = 5 denotes that the 3rd point in 2nd level corresponds to point 5 in coor array, i.e. coor[5][...] */ h_coo[i].a3 = i/LS; levels[(i/LS)*NL + i%LS] = i; } hipMemcpy(d_coo, h_coo, num_points*sizeof(coor), hipMemcpyHostToDevice); double *d_best_x, *d_best_y, *d_best_fx; int* d_LS, *d_i, *d_NL; hipMalloc((void **)&d_best_x, sizeof(double)); hipMalloc((void **)&d_best_y, sizeof(double)); hipMalloc((void **)&d_best_fx, sizeof(double)); hipMalloc((void **)&d_i, sizeof(int)); hipMalloc((void **)&d_LS, sizeof(int)); hipMalloc((void **)&d_NL, sizeof(int)); hipMemcpy(d_best_x, &best_x, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_best_y, &best_y, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_best_fx, &best_fx, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_LS, &LS, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_NL, &NL, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_levels, levels, sizeof(int)*NL*LS, hipMemcpyHostToDevice); for(int i=NL; i>=3; i--) { hipMemcpy(d_i, &i, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( noicetoit), dim3(LS/512),dim3(512), 0, 0, d_coo,d_levels, d_best_x, d_best_y, d_best_fx, d_i, d_LS, d_NL); fes+= LS; } // hipMemcpy(h_coo, d_coo, num_points*sizeof(coor), hipMemcpyDeviceToHost); for(int i=0; i<LS; i++) { int pt1 = 0 + (rand() % static_cast<int>(LS)); int pt2 = 0 + (rand() % static_cast<int>(LS)); int pt = levels[1*NL + i]; int temp1 = levels[0*NL + pt1]; int temp2 = levels[0*NL + pt2]; if(abs(eval(h_coo[temp2].a0, h_coo[temp2].a1) - eval(h_coo[temp1].a0, h_coo[temp1].a1)) < tol) { swap(temp1, temp2); } int r1 = ((double) rand() / (RAND_MAX)); int r2 = ((double) rand() / (RAND_MAX)); int r3 = ((double) rand() / (RAND_MAX)); // Update Functions h_coo[pt].a4 = r1*h_coo[pt].a4 + r2*(h_coo[temp1].a0 - h_coo[pt].a0) + phi*r3*(h_coo[temp2].a0 - h_coo[pt].a0); h_coo[pt].a5 = r1*h_coo[pt].a5 + r2*(h_coo[temp1].a1 - h_coo[pt].a1) + phi*r3*(h_coo[temp2].a1 - h_coo[pt].a1); h_coo[pt].a0 = h_coo[pt].a0 + h_coo[pt].a4; h_coo[pt].a1 = h_coo[pt].a1 + h_coo[pt].a5; double fx = eval(h_coo[pt].a0, h_coo[pt].a1); if(abs(fx - best_fx) < tol) { best_x = h_coo[pt].a0; best_y = h_coo[pt].a1; best_fx = fx; } h_coo[pt].a2 = fx; } fes+= LS; } cout << "Time: " << (double)(clock() - tStart)/CLOCKS_PER_SEC << endl; cout << "FINAL RESULTS: " << endl; cout << "Best x: " << best_x << endl; cout << "Best y: " << best_y << endl; // cout << "Best evaluation: " << best_fx << endl; return 0; }
3a81d610bc53cfda77b40942bcf20274c850e57e.cu
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <bits/stdc++.h> #include <curand.h> #include <curand_kernel.h> using namespace std; const int MAX_FES = 50000; int NL = 10; int LS = 4; int dim1 = 20; int dim2 = 1000000; const int num_points = NL*LS; const double phi = 0.1; const double tol = 0.000001; struct coor { double a0,a1,a2,a3,a4,a5,a6; }; struct coor1 { double* a1; double a2; double a3; double* a4; double a5; double a0; }; __device__ void func_eval_input_data(coor1* d_coo, double* y, double* sum) { // int tId = threadIdx.x + (blockIdx.x * blockDim.x); double temp = (*y); *sum *= temp; } __host__ __device__ void func_eval_input_data(coor1* d_coo, double* x, double* y, double* total) { double* d_sum, *sum; sum = (double *) malloc(sizeof(double)); *sum = 1; // cudaMalloc((void **)&sum,sizeof(double)); // int tId = threadIdx.x + (blockIdx.x * blockDim.x); // func_eval_input_data<<<(*x)/512,512>>>(d_coo, y, d_sum); *total += *sum; free(sum); } __host__ __device__ double eval(double x, double y) { // Function to be evaluated. Can be changed return sin(x)*cos(y); } /* bool comp(coor temp1, coor temp2) { // Comparator function for std::sort() return temp1.a2 < temp2.a2; } */ __host__ __device__ bool operator < (const coor& a,const coor& b){return a.a2 < b.a2;} __global__ void noicetoit(coor* h_coo, int* levels, double* best_x, double* best_y, double* best_fx, int* i, int* LS, int* NL) { int tId = threadIdx.x + (blockIdx.x * blockDim.x); curandState state; curand_init((unsigned long long)clock() + tId, 0, 0, &state); int index = threadIdx.x + blockIdx.x * blockDim.x; double rand1 = curand_uniform_double(&state); double rand2 = curand_uniform_double(&state); double rand3 = curand_uniform_double(&state); double rand4 = curand_uniform_double(&state); double rand5 = curand_uniform_double(&state); double rand6 = curand_uniform_double(&state); double rand7 = curand_uniform_double(&state); int pt = levels[(*i-1)*(*NL) + index]; // Choosing a random level int lev1 = (int)rand1 % (*i); int lev2 = (int)rand2 % (*i); if(lev2 < lev1) { int temp = lev2; lev2 = lev1; lev1 = temp; } // Choosing random points from those levels int pt1 = (int)rand3 % (*LS); int pt2 = (int)rand4 % (*LS); int temp1 = levels[lev1*(*NL) + pt1]; int temp2 = levels[lev2*(*NL) + pt2]; int r1 = ((double) rand5 / (RAND_MAX)); int r2 = ((double) rand6 / (RAND_MAX)); int r3 = ((double) rand7 / (RAND_MAX)); // Update Functions h_coo[pt].a4 = r1*(h_coo[pt].a4) + r2*((h_coo[temp1].a0) - (h_coo[pt].a0)) + phi*r3*((h_coo[temp2].a0) - (h_coo[pt].a0)); h_coo[pt].a5 = r1*h_coo[pt].a5 + r2*(h_coo[temp1].a1 - h_coo[pt].a1) + phi*r3*(h_coo[temp2].a1 - h_coo[pt].a1); h_coo[pt].a0 = h_coo[pt].a0 + h_coo[pt].a4; h_coo[pt].a1 = h_coo[pt].a1 + h_coo[pt].a5; double fx = eval(h_coo[pt].a0, h_coo[pt].a1); if(abs(fx - *best_fx) < tol) { *best_x = h_coo[pt].a0; *best_y = h_coo[pt].a1; *best_fx = fx; } h_coo[pt].a2 = fx; } int main() { clock_t tStart = clock(); int fes = 0; // Domain for coordinates int coor_low_lim = -10; int coor_high_lim = 50; // Domain for velocities double vel_low_lim = -0.1; double vel_high_lim = 0.1; double best_x; double best_y; double best_fx; //parallel programming coor* h_coo, *d_coo; h_coo = (coor *)malloc(num_points*sizeof(coor)); coor1* h_coo1, *d_coo1; h_coo1 = (coor1 *)malloc(num_points*sizeof(coor)); for(int i=0; i<num_points; i++) { h_coo1[i].a1 = (double *)malloc(dim1*sizeof(double)); h_coo1[i].a4 = (double *)malloc(dim1*sizeof(double)); } cudaMalloc((void **)&d_coo, num_points*sizeof(coor)); int *levels, *d_levels; levels = (int *)malloc(sizeof(int)*NL*LS); cudaMalloc((void **)&d_levels, sizeof(int)*NL*LS); // vector<vector<int> > levels(NL, vector<int> (LS, 0)); // Seeding random srand(static_cast <unsigned> (time(0))); for(int i=0; i< num_points; i++) { /* coor[i][...] contains property of each point. coor[i][0] -> x-coordinate coor[i][1] -> y-coordinate coor[i][2] -> function evaluation coor[i][3] -> level number coor[i][4] -> x-velocity coor[i][5] -> y-velocity */ h_coo[i].a0 = (coor_low_lim + static_cast <double> (rand()) /( static_cast <double> (RAND_MAX/(coor_high_lim - coor_low_lim)))); h_coo[i].a1 = (coor_low_lim + static_cast <double> (rand()) /( static_cast <double> (RAND_MAX/(coor_high_lim - coor_low_lim)))); h_coo[i].a2 = eval(h_coo[i].a0, h_coo[i].a1); h_coo[i].a4 = (vel_low_lim + static_cast <double> (rand()) /( static_cast <double> (RAND_MAX/(vel_high_lim - vel_low_lim)))); h_coo[i].a5 = (vel_low_lim + static_cast <double> (rand()) /( static_cast <double> (RAND_MAX/(vel_high_lim - vel_low_lim)))); } fes += num_points; cudaMemcpy(d_coo, h_coo, num_points*sizeof(coor), cudaMemcpyHostToDevice); while(fes < MAX_FES) { thrust::sort(h_coo, h_coo + num_points); best_fx = h_coo[0].a2; best_x = h_coo[0].a0; best_y = h_coo[0].a1; // Segregating points into levels for(int i=0; i<num_points; i++) { /* Levels basically acts as a lookup for coor array. Dimensions of levels: levels[NL][LS] levels[i] denotes (i+1)th level and each element in levels[i][...] denotes the number of the point in the coor array. For instance, levels[1][2] = 5 denotes that the 3rd point in 2nd level corresponds to point 5 in coor array, i.e. coor[5][...] */ h_coo[i].a3 = i/LS; levels[(i/LS)*NL + i%LS] = i; } cudaMemcpy(d_coo, h_coo, num_points*sizeof(coor), cudaMemcpyHostToDevice); double *d_best_x, *d_best_y, *d_best_fx; int* d_LS, *d_i, *d_NL; cudaMalloc((void **)&d_best_x, sizeof(double)); cudaMalloc((void **)&d_best_y, sizeof(double)); cudaMalloc((void **)&d_best_fx, sizeof(double)); cudaMalloc((void **)&d_i, sizeof(int)); cudaMalloc((void **)&d_LS, sizeof(int)); cudaMalloc((void **)&d_NL, sizeof(int)); cudaMemcpy(d_best_x, &best_x, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_best_y, &best_y, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_best_fx, &best_fx, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_LS, &LS, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_NL, &NL, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_levels, levels, sizeof(int)*NL*LS, cudaMemcpyHostToDevice); for(int i=NL; i>=3; i--) { cudaMemcpy(d_i, &i, sizeof(int), cudaMemcpyHostToDevice); noicetoit<<<LS/512,512>>>(d_coo,d_levels, d_best_x, d_best_y, d_best_fx, d_i, d_LS, d_NL); fes+= LS; } // cudaMemcpy(h_coo, d_coo, num_points*sizeof(coor), cudaMemcpyDeviceToHost); for(int i=0; i<LS; i++) { int pt1 = 0 + (rand() % static_cast<int>(LS)); int pt2 = 0 + (rand() % static_cast<int>(LS)); int pt = levels[1*NL + i]; int temp1 = levels[0*NL + pt1]; int temp2 = levels[0*NL + pt2]; if(abs(eval(h_coo[temp2].a0, h_coo[temp2].a1) - eval(h_coo[temp1].a0, h_coo[temp1].a1)) < tol) { swap(temp1, temp2); } int r1 = ((double) rand() / (RAND_MAX)); int r2 = ((double) rand() / (RAND_MAX)); int r3 = ((double) rand() / (RAND_MAX)); // Update Functions h_coo[pt].a4 = r1*h_coo[pt].a4 + r2*(h_coo[temp1].a0 - h_coo[pt].a0) + phi*r3*(h_coo[temp2].a0 - h_coo[pt].a0); h_coo[pt].a5 = r1*h_coo[pt].a5 + r2*(h_coo[temp1].a1 - h_coo[pt].a1) + phi*r3*(h_coo[temp2].a1 - h_coo[pt].a1); h_coo[pt].a0 = h_coo[pt].a0 + h_coo[pt].a4; h_coo[pt].a1 = h_coo[pt].a1 + h_coo[pt].a5; double fx = eval(h_coo[pt].a0, h_coo[pt].a1); if(abs(fx - best_fx) < tol) { best_x = h_coo[pt].a0; best_y = h_coo[pt].a1; best_fx = fx; } h_coo[pt].a2 = fx; } fes+= LS; } cout << "Time: " << (double)(clock() - tStart)/CLOCKS_PER_SEC << endl; cout << "FINAL RESULTS: " << endl; cout << "Best x: " << best_x << endl; cout << "Best y: " << best_y << endl; // cout << "Best evaluation: " << best_fx << endl; return 0; }
dd2d4487b70344a503b93389eb34638e21fcd0a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" ////////////////////////////////////////////////////////////////////////////////// //DisperGPU // //Copyright (C) 2016 Bosserelle // // // //This program is free software: you can redistribute it and/or modify // //it under the terms of the GNU General Public License as published by // //the Free Software Foundation. // // // //This program is distributed in the hope that it will be useful, // //but WITHOUT ANY WARRANTY; without even the implied warranty of // //MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // //GNU General Public License for more details. // // // //You should have received a copy of the GNU General Public License // //along with this program. If not, see <http://www.gnu.org/licenses/>. // ////////////////////////////////////////////////////////////////////////////////// // declare texture reference for 2D float texture texture<float, 2, hipReadModeElementType> texU; texture<float, 2, hipReadModeElementType> texV; texture<float, 2, hipReadModeElementType> texlonu; texture<float, 2, hipReadModeElementType> texlatu; texture<float, 2, hipReadModeElementType> texdXU; texture<float, 2, hipReadModeElementType> texdYV; __global__ void HD_interp(int nx, int ny, int backswitch, int nhdstp, float totaltime, float hddt, float * Uold, float * Unew, float * UU) { unsigned int ix = blockIdx.x*blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y*blockDim.y + threadIdx.y; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; __shared__ float Uxo[16][16]; __shared__ float Uxn[16][16]; // __shared__ float Ums[16]; float fac = 1.0; /*Ums[tx]=Umask[ix];*/ if (backswitch>0) { fac = -1.0f; } if (ix<nx && iy<ny) { Uxo[tx][ty] = fac*Uold[ix + nx*iy]/**Ums[tx]*/; Uxn[tx][ty] = fac*Unew[ix + nx*iy]/**Ums[tx]*/; UU[ix + nx*iy] = Uxo[tx][ty] + (totaltime - hddt*nhdstp)*(Uxn[tx][ty] - Uxo[tx][ty]) / hddt; } } __global__ void NextHDstep(int nx, int ny, float * Uold, float * Unew) { //int ix = blockIdx.x * blockDim.x * blockDim.y + blockDim.x * threadIdx.y + threadIdx.x; unsigned int ix = blockIdx.x*blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y*blockDim.y + threadIdx.y; if (ix<nx && iy<ny) { Uold[ix + iy*nx] = Unew[ix + iy*nx]; } } __global__ void ResetNincel(int nx, int ny, float *Nincel) { //int ix = blockIdx.x * blockDim.x * blockDim.y + blockDim.x * threadIdx.y + threadIdx.x; unsigned int ix = blockIdx.x*blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y*blockDim.y + threadIdx.y; if (ix<nx && iy<ny) { Nincel[ix + iy*nx] = 0.0f; } } __global__ void updatepartpos(int npart, float dt, float Eh, float * dd_rand, float4 * partpos) { int i = blockIdx.x * blockDim.x * blockDim.y + blockDim.x * threadIdx.y + threadIdx.x; float Ux = 0.0f; float Vx = 0.0f; float Xd = 0.0f; //Diffusion term float Yd = 0.0f; float distu, distv; float xxx, yyy, zzz, ttt; xxx = partpos[i].x; //should be in i,j yyy = partpos[i].y; zzz = partpos[i].z; ttt = partpos[i].w; if (ttt >= 0.0f) { //Interpolate wter depth, Uvel Vvel at the particle position Ux = tex2D(texU, xxx, yyy); Vx = tex2D(texV, xxx + 0.5, yyy - 0.5);// U and V don't have the same coordinates but in the number of nodes it is just off by half a grid node in both dimension distu = tex2D(texdXU, xxx, yyy); distv = tex2D(texdYV, xxx + 0.5, yyy - 0.5); if (distu>0.0001 && distv>0.0001)//Avoid the div by zero which makes i and j #INF { // old formulation //Xd=(dd_rand[i]*2-1)*sqrtf(6*Eh*dt); //Yd=(dd_rand[npart-i]*2-1)*sqrtf(6*Eh*dt); //formulation used in Viikmae et al. Xd = sqrtf(-4.0f * Eh*dt*logf(1 - dd_rand[i]))*cosf(2 * pi*dd_rand[npart - i]); Yd = sqrtf(-4.0f * Eh*dt*logf(1 - dd_rand[i]))*sinf(2 * pi*dd_rand[npart - i]); xxx = xxx + (Ux*dt + Xd) / distu; // Need to add the runge kutta scheme here or not yyy = yyy + (Vx*dt + Yd) / distv; zzz = Ux; } } ttt = ttt + dt; partpos[i] = make_float4(xxx, yyy, zzz, ttt); } __global__ void ij2lonlat(int npart, float * xx, float *yy, float *xp, float *yp) { int i = blockIdx.x * blockDim.x * blockDim.y + blockDim.x * threadIdx.y + threadIdx.x; float lon; float lat; float xxx, yyy; xxx = xx[i]; yyy = yy[i]; lon = tex2D(texlonu, xxx, yyy); lat = tex2D(texlatu, xxx, yyy); xp[i] = lon; yp[i] = lat; // } __global__ void CalcNincel(int npart, int nx, int ny, float4* partpos, float *Nincel, float *cNincel, float *cTincel) { int i = blockIdx.x * blockDim.x * blockDim.y + blockDim.x * threadIdx.y + threadIdx.x; float xxx, yyy, ttt; int ix, iy; xxx = partpos[i].x; yyy = partpos[i].y; ttt = partpos[i].w; if (ttt >= 0) { if (xxx>0 && xxx<(nx - 1) && yyy>0 && yyy<ny - 1) { ix = floor(xxx); iy = floor(yyy); Nincel[ix + iy*nx] = Nincel[ix + iy*nx] + 1; cNincel[ix + iy*nx] = cNincel[ix + iy*nx] + 1; cTincel[ix + iy*nx] = cTincel[ix + iy*nx] + ttt; } } }
dd2d4487b70344a503b93389eb34638e21fcd0a6.cu
////////////////////////////////////////////////////////////////////////////////// //DisperGPU // //Copyright (C) 2016 Bosserelle // // // //This program is free software: you can redistribute it and/or modify // //it under the terms of the GNU General Public License as published by // //the Free Software Foundation. // // // //This program is distributed in the hope that it will be useful, // //but WITHOUT ANY WARRANTY; without even the implied warranty of // //MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // //GNU General Public License for more details. // // // //You should have received a copy of the GNU General Public License // //along with this program. If not, see <http://www.gnu.org/licenses/>. // ////////////////////////////////////////////////////////////////////////////////// // declare texture reference for 2D float texture texture<float, 2, cudaReadModeElementType> texU; texture<float, 2, cudaReadModeElementType> texV; texture<float, 2, cudaReadModeElementType> texlonu; texture<float, 2, cudaReadModeElementType> texlatu; texture<float, 2, cudaReadModeElementType> texdXU; texture<float, 2, cudaReadModeElementType> texdYV; __global__ void HD_interp(int nx, int ny, int backswitch, int nhdstp, float totaltime, float hddt, float * Uold, float * Unew, float * UU) { unsigned int ix = blockIdx.x*blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y*blockDim.y + threadIdx.y; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; __shared__ float Uxo[16][16]; __shared__ float Uxn[16][16]; // __shared__ float Ums[16]; float fac = 1.0; /*Ums[tx]=Umask[ix];*/ if (backswitch>0) { fac = -1.0f; } if (ix<nx && iy<ny) { Uxo[tx][ty] = fac*Uold[ix + nx*iy]/**Ums[tx]*/; Uxn[tx][ty] = fac*Unew[ix + nx*iy]/**Ums[tx]*/; UU[ix + nx*iy] = Uxo[tx][ty] + (totaltime - hddt*nhdstp)*(Uxn[tx][ty] - Uxo[tx][ty]) / hddt; } } __global__ void NextHDstep(int nx, int ny, float * Uold, float * Unew) { //int ix = blockIdx.x * blockDim.x * blockDim.y + blockDim.x * threadIdx.y + threadIdx.x; unsigned int ix = blockIdx.x*blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y*blockDim.y + threadIdx.y; if (ix<nx && iy<ny) { Uold[ix + iy*nx] = Unew[ix + iy*nx]; } } __global__ void ResetNincel(int nx, int ny, float *Nincel) { //int ix = blockIdx.x * blockDim.x * blockDim.y + blockDim.x * threadIdx.y + threadIdx.x; unsigned int ix = blockIdx.x*blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y*blockDim.y + threadIdx.y; if (ix<nx && iy<ny) { Nincel[ix + iy*nx] = 0.0f; } } __global__ void updatepartpos(int npart, float dt, float Eh, float * dd_rand, float4 * partpos) { int i = blockIdx.x * blockDim.x * blockDim.y + blockDim.x * threadIdx.y + threadIdx.x; float Ux = 0.0f; float Vx = 0.0f; float Xd = 0.0f; //Diffusion term float Yd = 0.0f; float distu, distv; float xxx, yyy, zzz, ttt; xxx = partpos[i].x; //should be in i,j yyy = partpos[i].y; zzz = partpos[i].z; ttt = partpos[i].w; if (ttt >= 0.0f) { //Interpolate wter depth, Uvel Vvel at the particle position Ux = tex2D(texU, xxx, yyy); Vx = tex2D(texV, xxx + 0.5, yyy - 0.5);// U and V don't have the same coordinates but in the number of nodes it is just off by half a grid node in both dimension distu = tex2D(texdXU, xxx, yyy); distv = tex2D(texdYV, xxx + 0.5, yyy - 0.5); if (distu>0.0001 && distv>0.0001)//Avoid the div by zero which makes i and j #INF { // old formulation //Xd=(dd_rand[i]*2-1)*sqrtf(6*Eh*dt); //Yd=(dd_rand[npart-i]*2-1)*sqrtf(6*Eh*dt); //formulation used in Viikmae et al. Xd = sqrtf(-4.0f * Eh*dt*logf(1 - dd_rand[i]))*cosf(2 * pi*dd_rand[npart - i]); Yd = sqrtf(-4.0f * Eh*dt*logf(1 - dd_rand[i]))*sinf(2 * pi*dd_rand[npart - i]); xxx = xxx + (Ux*dt + Xd) / distu; // Need to add the runge kutta scheme here or not yyy = yyy + (Vx*dt + Yd) / distv; zzz = Ux; } } ttt = ttt + dt; partpos[i] = make_float4(xxx, yyy, zzz, ttt); } __global__ void ij2lonlat(int npart, float * xx, float *yy, float *xp, float *yp) { int i = blockIdx.x * blockDim.x * blockDim.y + blockDim.x * threadIdx.y + threadIdx.x; float lon; float lat; float xxx, yyy; xxx = xx[i]; yyy = yy[i]; lon = tex2D(texlonu, xxx, yyy); lat = tex2D(texlatu, xxx, yyy); xp[i] = lon; yp[i] = lat; // } __global__ void CalcNincel(int npart, int nx, int ny, float4* partpos, float *Nincel, float *cNincel, float *cTincel) { int i = blockIdx.x * blockDim.x * blockDim.y + blockDim.x * threadIdx.y + threadIdx.x; float xxx, yyy, ttt; int ix, iy; xxx = partpos[i].x; yyy = partpos[i].y; ttt = partpos[i].w; if (ttt >= 0) { if (xxx>0 && xxx<(nx - 1) && yyy>0 && yyy<ny - 1) { ix = floor(xxx); iy = floor(yyy); Nincel[ix + iy*nx] = Nincel[ix + iy*nx] + 1; cNincel[ix + iy*nx] = cNincel[ix + iy*nx] + 1; cTincel[ix + iy*nx] = cTincel[ix + iy*nx] + ttt; } } }
acb1ea14e6dd55ccaa4ac0fcb6bfb61843c11e4d.hip
// !!! This is a file automatically generated by hipify!!! #include "lab3_io.h" #include "lab3_cuda.h" #include <stdlib.h> #include <omp.h> /* Arguments: arg1: input filename (consist M, N and D) arg2: retention (percentage of information to be retained by PCA) */ int main(int argc, char const *argv[]) { if (argc < 3){ printf("\nLess Arguments\n"); return 0; } if (argc > 3){ printf("\nTOO many Arguments\n"); return 0; } //--------------------------------------------------------------------- int M; //no of rows (samples) in input matrix D (input) int N; //no of columns (features) in input matrix D (input) double* D; //1D array of M x N matrix to be reduced (input) double* U; //1D array of N x N (or M x M) matrix U (to be computed by SVD) double* SIGMA; //1D array of N x M (or M x N) diagonal matrix SIGMA (to be computed by SVD) double* V_T; //1D array of M x M (or N x N) matrix V_T (to be computed by SVD) int SIGMAm; // #rows in SIGMA, read note in lab3_cuda.h (to be computed by SVD) int SIGMAn; // #columns in SIGMA, read note in lab3_cuda.h (to be computed by SVD) int K; //no of coulmns (features) in reduced matrix D_HAT (to be computed by PCA) double *D_HAT; //1D array of M x K reduced matrix (to be computed by PCA) int retention; //percentage of information to be retained by PCA (command line input) //--------------------------------------------------------------------- retention = atoi(argv[2]); //retention = 90 means 90% of information should be retained float computation_time; /* -- Pre-defined function -- reads matrix and its dimentions from input file and creats array D #elements in D is M * N format - -------------------------------------------------------------------------------------- | D[0][0] | D[0][1] | ... | D[0][N-1] | D[1][0] | ... | D[1][N-1] | ... | D[M-1][N-1] | -------------------------------------------------------------------------------------- */ read_matrix (argv[1], &M, &N, &D); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // /* // ***************************************************** // TODO -- You must implement this function // ***************************************************** // */ SVD_and_PCA(M, N, D, &U, &SIGMA, &V_T, &SIGMAm, &SIGMAn, &D_HAT, &K, retention); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&computation_time, start, stop); /* --Pre-defined functions -- checks for correctness of results computed by SVD and PCA and outputs the results */ write_result(M, N, D, U, SIGMA, V_T, SIGMAm, SIGMAn, K, D_HAT, computation_time); return 0; }
acb1ea14e6dd55ccaa4ac0fcb6bfb61843c11e4d.cu
#include "lab3_io.h" #include "lab3_cuda.h" #include <stdlib.h> #include <omp.h> /* Arguments: arg1: input filename (consist M, N and D) arg2: retention (percentage of information to be retained by PCA) */ int main(int argc, char const *argv[]) { if (argc < 3){ printf("\nLess Arguments\n"); return 0; } if (argc > 3){ printf("\nTOO many Arguments\n"); return 0; } //--------------------------------------------------------------------- int M; //no of rows (samples) in input matrix D (input) int N; //no of columns (features) in input matrix D (input) double* D; //1D array of M x N matrix to be reduced (input) double* U; //1D array of N x N (or M x M) matrix U (to be computed by SVD) double* SIGMA; //1D array of N x M (or M x N) diagonal matrix SIGMA (to be computed by SVD) double* V_T; //1D array of M x M (or N x N) matrix V_T (to be computed by SVD) int SIGMAm; // #rows in SIGMA, read note in lab3_cuda.h (to be computed by SVD) int SIGMAn; // #columns in SIGMA, read note in lab3_cuda.h (to be computed by SVD) int K; //no of coulmns (features) in reduced matrix D_HAT (to be computed by PCA) double *D_HAT; //1D array of M x K reduced matrix (to be computed by PCA) int retention; //percentage of information to be retained by PCA (command line input) //--------------------------------------------------------------------- retention = atoi(argv[2]); //retention = 90 means 90% of information should be retained float computation_time; /* -- Pre-defined function -- reads matrix and its dimentions from input file and creats array D #elements in D is M * N format - -------------------------------------------------------------------------------------- | D[0][0] | D[0][1] | ... | D[0][N-1] | D[1][0] | ... | D[1][N-1] | ... | D[M-1][N-1] | -------------------------------------------------------------------------------------- */ read_matrix (argv[1], &M, &N, &D); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // /* // ***************************************************** // TODO -- You must implement this function // ***************************************************** // */ SVD_and_PCA(M, N, D, &U, &SIGMA, &V_T, &SIGMAm, &SIGMAn, &D_HAT, &K, retention); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&computation_time, start, stop); /* --Pre-defined functions -- checks for correctness of results computed by SVD and PCA and outputs the results */ write_result(M, N, D, U, SIGMA, V_T, SIGMAm, SIGMAn, K, D_HAT, computation_time); return 0; }
318d9941aaa036f6ba47913be82f96d0db40d479.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "job2a.h" int main(){ int **h_A = (int**)malloc(size*sizeof(int*)); int **h_R = (int**)malloc(size*sizeof(int*)); int a,b; hipError_t err; for(a=0;a<size;a++)h_A[a]=(int*)malloc(size*sizeof(int)); for(a=0;a<size;a++)h_R[a]=(int*)malloc(size*sizeof(int)); for(a=0;a<size;a++){ for(b=0;b<size;b++){ h_A[a][b]=rand(); } } int * d_A; err = hipMalloc((void**)&d_A,size*size*sizeof(int)); debug(err,"Unable to allocate matrix on device") //copy for(a=0;a<size;a++){ err = hipMemcpy(d_A+a*size,h_A[a],size*sizeof(int),hipMemcpyHostToDevice); debug(err,"Failed to copy to device") } printf("Orignal Matrix:\n"); fflush(stdout); for(a=0;a<size;a++){ for(b=0;b<size;b++){ printf("%d ",h_A[a][b]); } printf("\n"); } fflush(stdout); dim3 blockdim(8,8,1); int gridx = (int)ceil(8.0/size); dim3 griddim(gridx,gridx,1); printf("Swapping..."); hipLaunchKernelGGL(( swap), dim3(griddim),dim3(blockdim), 0, 0, d_A,size); printf("Done.\n"); fflush(stdout); err = hipGetLastError(); debug(err,"Last error in execution") for(a=0;a<size;a++){ err = hipMemcpy(h_R[a],d_A+a*size,size*sizeof(int),hipMemcpyDeviceToHost); debug(err,"Failed to copy back to host") } printf("Swapped array:\n"); for(a=0;a<size;a++){ for(b=0;b<size;b++){ printf("%d ",h_R[a][b]); } printf("\n"); } fflush(stdout); for(a=0;a<size;a++){ free(h_A[a]); free(h_R[a]); } free(h_A); free(h_R); err = hipFree(d_A); debug(err,"Failed to free memory on device") err = hipDeviceReset(); debug(err,"Unable to reset device") printf("Last err: %d\n",err); return 0; }
318d9941aaa036f6ba47913be82f96d0db40d479.cu
#include "job2a.h" int main(){ int **h_A = (int**)malloc(size*sizeof(int*)); int **h_R = (int**)malloc(size*sizeof(int*)); int a,b; cudaError_t err; for(a=0;a<size;a++)h_A[a]=(int*)malloc(size*sizeof(int)); for(a=0;a<size;a++)h_R[a]=(int*)malloc(size*sizeof(int)); for(a=0;a<size;a++){ for(b=0;b<size;b++){ h_A[a][b]=rand(); } } int * d_A; err = cudaMalloc((void**)&d_A,size*size*sizeof(int)); debug(err,"Unable to allocate matrix on device") //copy for(a=0;a<size;a++){ err = cudaMemcpy(d_A+a*size,h_A[a],size*sizeof(int),cudaMemcpyHostToDevice); debug(err,"Failed to copy to device") } printf("Orignal Matrix:\n"); fflush(stdout); for(a=0;a<size;a++){ for(b=0;b<size;b++){ printf("%d ",h_A[a][b]); } printf("\n"); } fflush(stdout); dim3 blockdim(8,8,1); int gridx = (int)ceil(8.0/size); dim3 griddim(gridx,gridx,1); printf("Swapping..."); swap<<<griddim,blockdim>>>(d_A,size); printf("Done.\n"); fflush(stdout); err = cudaGetLastError(); debug(err,"Last error in execution") for(a=0;a<size;a++){ err = cudaMemcpy(h_R[a],d_A+a*size,size*sizeof(int),cudaMemcpyDeviceToHost); debug(err,"Failed to copy back to host") } printf("Swapped array:\n"); for(a=0;a<size;a++){ for(b=0;b<size;b++){ printf("%d ",h_R[a][b]); } printf("\n"); } fflush(stdout); for(a=0;a<size;a++){ free(h_A[a]); free(h_R[a]); } free(h_A); free(h_R); err = cudaFree(d_A); debug(err,"Failed to free memory on device") err = cudaDeviceReset(); debug(err,"Unable to reset device") printf("Last err: %d\n",err); return 0; }
906919e03567f1fbee14a958390126c8291f39b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "src/extendrt/delegate/tensorrt/cuda_impl/equal.cuh" #include <stdio.h> #include "src/extendrt/delegate/tensorrt/cuda_impl/cuda_helper.h" template <typename T> __global__ void EqualKernel(const T *input1, const T *input2, T *output, int element_cnt) { for (int pos = blockIdx.x * blockDim.x + threadIdx.x; pos < element_cnt; pos += blockDim.x * gridDim.x) { output[pos] = (input1[pos] - input2[pos] < 1e-6 && input1[pos] - input2[pos] > -1e-6); } } template <typename T> void Equal(const T *input1, const T *input2, T *output, int element_cnt, hipStream_t stream) { hipLaunchKernelGGL(( EqualKernel), dim3(GET_BLOCKS(element_cnt)), dim3(GET_THREADS), 0, stream, input1, input2, output, element_cnt); return; } template void Equal(const float *input1, const float *input2, float *output, int element_cnt, hipStream_t stream); template void Equal(const int *input1, const int *input2, int *output, int element_cnt, hipStream_t stream);
906919e03567f1fbee14a958390126c8291f39b3.cu
/** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "src/extendrt/delegate/tensorrt/cuda_impl/equal.cuh" #include <stdio.h> #include "src/extendrt/delegate/tensorrt/cuda_impl/cuda_helper.h" template <typename T> __global__ void EqualKernel(const T *input1, const T *input2, T *output, int element_cnt) { for (int pos = blockIdx.x * blockDim.x + threadIdx.x; pos < element_cnt; pos += blockDim.x * gridDim.x) { output[pos] = (input1[pos] - input2[pos] < 1e-6 && input1[pos] - input2[pos] > -1e-6); } } template <typename T> void Equal(const T *input1, const T *input2, T *output, int element_cnt, cudaStream_t stream) { EqualKernel<<<GET_BLOCKS(element_cnt), GET_THREADS, 0, stream>>>(input1, input2, output, element_cnt); return; } template void Equal(const float *input1, const float *input2, float *output, int element_cnt, cudaStream_t stream); template void Equal(const int *input1, const int *input2, int *output, int element_cnt, cudaStream_t stream);
2579c822fcfb9b926cbb906f097d4f8b03d15fc6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include <iostream> #include <stdio.h> #include <vector> #include <cmath> #include <time.h> #include <algorithm> #include <fstream> #include <string> #include "host_function.cuh" #include "matrix_operations.cuh" #define OptNofThreads 128 #define OptNofBlocks 128 #define OptNofBlocksX 32 #define OptNofBlocksY 32 template <typename T> __host__ void train_executer(T* d_images, T* d_labels, T* d_posibilities, T* d_errors, T* d_third_layer_sums, T* d_third_layer_output, T* d_second_layer_output, T* d_first_layer_output, T* d_sigmoid_multiplier, T* d_error_multiplier, T* d_first_layer_weights, T* d_second_layer_weights, T* d_third_layer_weights, T* d_third_layer_correction, T* d_second_layer_correction, T* d_first_layer_correction, T* d_square_error, int n_of_images, int n_of_models, int n_of_third_layer_neurons, int n_of_second_layer_neurons, int n_of_neurons, float alpha, float* square_error, float previous_error, float current_error, float** first_layer_versions, float** second_layer_versions, float** third_layer_versions, int n_of_versions, float dropout_rate, bool execute) { int n_of_v = 0; int n_for_dropout = dropout_rate * n_of_second_layer_neurons; //T* dropped_second_layer; //hipMalloc((void**)&dropped_second_layer, sizeof(T)*n_for_dropout*n_of_models); //unsigned int* second_layer_indices; //hipMalloc((void**)&second_layer_indices, sizeof(unsigned int)*n_for_dropout*n_of_models); //hiprandGenerator_t gen; //hiprandCreateGenerator(&gen, // HIPRAND_RNG_PSEUDO_DEFAULT); //hiprandSetPseudoRandomGeneratorSeed(gen, // 1234ULL); //while (previous_error > current_error) for (int i = 0; i < 500; ++i) { //hiprandGenerate(gen, second_layer_indices, n_for_dropout*n_of_models); //GENERATING OUTPUT //***************** get_first_layer_output << < OptNofBlocks, OptNofThreads >> > (d_images, d_first_layer_weights, d_first_layer_output, d_sigmoid_multiplier, n_of_images, n_of_second_layer_neurons, n_of_neurons); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "get_first_layer_output kernel launch without error" << std::endl; } } hipDeviceSynchronize(); get_second_layer_output << < OptNofBlocks, OptNofThreads >> > (d_first_layer_output, d_second_layer_output, d_second_layer_weights, d_error_multiplier, n_of_images, n_of_second_layer_neurons, n_of_third_layer_neurons); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "get_second_layer_output kernel launch without error" << std::endl; } } hipDeviceSynchronize(); get_third_layer_output << < n_of_models, OptNofThreads >> > (d_third_layer_weights, d_second_layer_output, d_third_layer_output, d_third_layer_sums, n_of_images, n_of_third_layer_neurons); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "get_second_layer_output kernel launch without error" << std::endl; } } hipDeviceSynchronize(); get_posibilities << <OptNofBlocks, n_of_models >> > (d_third_layer_output, d_third_layer_sums, d_posibilities, n_of_images, n_of_models); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "get_posibilities kernel launch without error" << std::endl; } } hipDeviceSynchronize(); //********************* //END GENERATING OUTPUT if (execute) { break; } else { //THIRD_LAYER_WEIGHTS UPDATE //*************************** previous_error = current_error; get_errors << < n_of_models, OptNofThreads >> > (d_posibilities, d_labels, d_errors, d_square_error, n_of_images, n_of_models); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "get_errors kernel launch without error" << std::endl; } } hipDeviceSynchronize(); Error_check(hipMemcpy(square_error, d_square_error, sizeof(float)*n_of_models, hipMemcpyDeviceToHost), " copy square_error back to host"); current_error = square_error[0]; std::wcout << " \n\n\n " << current_error << std::endl; get_third_layer_correction << <n_of_models, OptNofThreads >> > (d_errors, d_second_layer_output, d_third_layer_correction, d_third_layer_weights, d_error_multiplier, n_of_images, n_of_third_layer_neurons, n_of_models); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "get_third_layercorrection kernel launch without error" << std::endl; } } hipDeviceSynchronize(); third_layer_weights_update << <n_of_models, OptNofThreads >> > (d_third_layer_weights, d_third_layer_correction, n_of_images, n_of_third_layer_neurons, 0.05* alpha); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "third_layer_update kernel launch without error" << std::endl; } } hipDeviceSynchronize(); //******************************* //END THIRD_LAYER_WEIGHTS UPDATE //SECOND_LAYER_WEIGHTS UPDATE //************************** dim3 GridDim2(OptNofBlocksX, OptNofBlocksY); get_second_layer_correction << < GridDim2, OptNofThreads >> > (d_errors, d_first_layer_output, d_second_layer_output, d_second_layer_weights, d_third_layer_weights, d_second_layer_correction, d_error_multiplier, d_sigmoid_multiplier, n_of_images, n_of_second_layer_neurons, n_of_third_layer_neurons, n_of_models); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "get_second_layer_correction kernel launch without error" << std::endl; } } hipDeviceSynchronize(); second_layer_weights_update << < n_of_third_layer_neurons, n_of_second_layer_neurons + 1 >> > (d_second_layer_weights, d_second_layer_correction, alpha * 0.2, n_of_second_layer_neurons); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "second_layer_weights_update kernel launch without error" << std::endl; } } hipDeviceSynchronize(); //********************************** //END OF SECOND_LAYER_WEIGHTS UPDATE //FIRST_LAYER_WEIGHTS UPDATE //************************** dim3 GridDim1(OptNofBlocksX, OptNofBlocksY); get_first_layer_correction << < GridDim1, OptNofThreads >> > (d_errors, d_images, d_first_layer_output, d_second_layer_output, d_third_layer_weights, d_second_layer_weights, d_first_layer_correction, d_sigmoid_multiplier, n_of_images, n_of_neurons, n_of_second_layer_neurons, n_of_third_layer_neurons, n_of_models); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "get_first_layer_correction kernel launch without error" << std::endl; } } hipDeviceSynchronize(); first_layer_weights_update << <OptNofBlocks, OptNofThreads >> > (d_first_layer_weights, d_first_layer_correction, alpha, n_of_neurons, n_of_second_layer_neurons); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "first_layer_weights_update kernel launch without error" << std::endl; } } hipDeviceSynchronize(); //****************************** //END FIRST_LAYER_WEIGHTS UPDATE if ((i + 1) % (500 / n_of_versions) == 0) { hipMemcpy(first_layer_versions[n_of_v], d_first_layer_weights, sizeof(float)*n_of_second_layer_neurons*n_of_neurons, hipMemcpyDeviceToHost); hipMemcpy(second_layer_versions[n_of_v], d_second_layer_weights, sizeof(float)*n_of_third_layer_neurons*(n_of_second_layer_neurons + 1), hipMemcpyDeviceToHost); hipMemcpy(third_layer_versions[n_of_v], d_third_layer_weights, sizeof(float)*n_of_models*(n_of_third_layer_neurons + 1), hipMemcpyDeviceToHost); n_of_v++; } } } } template <typename T> __host__ void executer(T* d_images, T* d_first_layer_weights, T* d_second_layer_weights, T* d_third_layer_weights, T* d_posibilities, int n_of_images, int n_of_neurons, int n_of_second_layer_neurons, int n_of_third_layer_neurons, int n_of_models) { float* d_first_layer_output, *d_second_layer_output, *d_third_layer_output, *d_third_layer_sums, *d_error_multiplier, *d_sigmoid_multiplier; hipMalloc((void**)&d_first_layer_output, sizeof(float)*(n_of_second_layer_neurons + 1)*n_of_images); hipMalloc((void**)&d_second_layer_output, sizeof(float)*(n_of_third_layer_neurons + 1)*n_of_models); hipMalloc((void**)&d_third_layer_output, sizeof(float)*n_of_images*n_of_models); hipMalloc((void**)&d_third_layer_sums, sizeof(float)*n_of_images); Error_check(hipMalloc((void**)&d_sigmoid_multiplier, sizeof(float)*n_of_images*n_of_second_layer_neurons), "labels allocating on device"); Error_check(hipMalloc((void**)&d_error_multiplier, sizeof(float)*n_of_images*n_of_third_layer_neurons), "labels allocating on device"); //GENERATING OUTPUT //***************** get_first_layer_output << < OptNofBlocks, OptNofThreads >> > (d_images, d_first_layer_weights, d_first_layer_output, d_sigmoid_multiplier, n_of_images, n_of_second_layer_neurons, n_of_neurons); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "get_first_layer_output kernel launch without error" << std::endl; } } hipDeviceSynchronize(); get_second_layer_output << < OptNofBlocks, OptNofThreads >> > (d_first_layer_output, d_second_layer_output, d_second_layer_weights, d_error_multiplier, n_of_images, n_of_second_layer_neurons, n_of_third_layer_neurons); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "get_second_layer_output kernel launch without error" << std::endl; } } hipDeviceSynchronize(); get_third_layer_output << < n_of_models, OptNofThreads >> > (d_third_layer_weights, d_second_layer_output, d_third_layer_output, d_third_layer_sums, n_of_images, n_of_third_layer_neurons); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "get_second_layer_output kernel launch without error" << std::endl; } } hipDeviceSynchronize(); get_posibilities << <OptNofBlocks, n_of_models >> > (d_third_layer_output, d_third_layer_sums, d_posibilities, n_of_images, n_of_models); { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; } else { std::cout << "get_posibilities kernel launch without error" << std::endl; } } hipDeviceSynchronize(); //********************* //END GENERATING OUTPUT hipFree(d_first_layer_output); hipFree(d_third_layer_sums); }
2579c822fcfb9b926cbb906f097d4f8b03d15fc6.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda.h" #include "curand.h" #include <iostream> #include <stdio.h> #include <vector> #include <cmath> #include <time.h> #include <algorithm> #include <fstream> #include <string> #include "host_function.cuh" #include "matrix_operations.cuh" #define OptNofThreads 128 #define OptNofBlocks 128 #define OptNofBlocksX 32 #define OptNofBlocksY 32 template <typename T> __host__ void train_executer(T* d_images, T* d_labels, T* d_posibilities, T* d_errors, T* d_third_layer_sums, T* d_third_layer_output, T* d_second_layer_output, T* d_first_layer_output, T* d_sigmoid_multiplier, T* d_error_multiplier, T* d_first_layer_weights, T* d_second_layer_weights, T* d_third_layer_weights, T* d_third_layer_correction, T* d_second_layer_correction, T* d_first_layer_correction, T* d_square_error, int n_of_images, int n_of_models, int n_of_third_layer_neurons, int n_of_second_layer_neurons, int n_of_neurons, float alpha, float* square_error, float previous_error, float current_error, float** first_layer_versions, float** second_layer_versions, float** third_layer_versions, int n_of_versions, float dropout_rate, bool execute) { int n_of_v = 0; int n_for_dropout = dropout_rate * n_of_second_layer_neurons; //T* dropped_second_layer; //cudaMalloc((void**)&dropped_second_layer, sizeof(T)*n_for_dropout*n_of_models); //unsigned int* second_layer_indices; //cudaMalloc((void**)&second_layer_indices, sizeof(unsigned int)*n_for_dropout*n_of_models); //curandGenerator_t gen; //curandCreateGenerator(&gen, // CURAND_RNG_PSEUDO_DEFAULT); //curandSetPseudoRandomGeneratorSeed(gen, // 1234ULL); //while (previous_error > current_error) for (int i = 0; i < 500; ++i) { //curandGenerate(gen, second_layer_indices, n_for_dropout*n_of_models); //GENERATING OUTPUT //***************** get_first_layer_output << < OptNofBlocks, OptNofThreads >> > (d_images, d_first_layer_weights, d_first_layer_output, d_sigmoid_multiplier, n_of_images, n_of_second_layer_neurons, n_of_neurons); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "get_first_layer_output kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); get_second_layer_output << < OptNofBlocks, OptNofThreads >> > (d_first_layer_output, d_second_layer_output, d_second_layer_weights, d_error_multiplier, n_of_images, n_of_second_layer_neurons, n_of_third_layer_neurons); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "get_second_layer_output kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); get_third_layer_output << < n_of_models, OptNofThreads >> > (d_third_layer_weights, d_second_layer_output, d_third_layer_output, d_third_layer_sums, n_of_images, n_of_third_layer_neurons); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "get_second_layer_output kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); get_posibilities << <OptNofBlocks, n_of_models >> > (d_third_layer_output, d_third_layer_sums, d_posibilities, n_of_images, n_of_models); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "get_posibilities kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); //********************* //END GENERATING OUTPUT if (execute) { break; } else { //THIRD_LAYER_WEIGHTS UPDATE //*************************** previous_error = current_error; get_errors << < n_of_models, OptNofThreads >> > (d_posibilities, d_labels, d_errors, d_square_error, n_of_images, n_of_models); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "get_errors kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); Error_check(cudaMemcpy(square_error, d_square_error, sizeof(float)*n_of_models, cudaMemcpyDeviceToHost), " copy square_error back to host"); current_error = square_error[0]; std::wcout << " \n\n\n " << current_error << std::endl; get_third_layer_correction << <n_of_models, OptNofThreads >> > (d_errors, d_second_layer_output, d_third_layer_correction, d_third_layer_weights, d_error_multiplier, n_of_images, n_of_third_layer_neurons, n_of_models); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "get_third_layercorrection kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); third_layer_weights_update << <n_of_models, OptNofThreads >> > (d_third_layer_weights, d_third_layer_correction, n_of_images, n_of_third_layer_neurons, 0.05* alpha); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "third_layer_update kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); //******************************* //END THIRD_LAYER_WEIGHTS UPDATE //SECOND_LAYER_WEIGHTS UPDATE //************************** dim3 GridDim2(OptNofBlocksX, OptNofBlocksY); get_second_layer_correction << < GridDim2, OptNofThreads >> > (d_errors, d_first_layer_output, d_second_layer_output, d_second_layer_weights, d_third_layer_weights, d_second_layer_correction, d_error_multiplier, d_sigmoid_multiplier, n_of_images, n_of_second_layer_neurons, n_of_third_layer_neurons, n_of_models); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "get_second_layer_correction kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); second_layer_weights_update << < n_of_third_layer_neurons, n_of_second_layer_neurons + 1 >> > (d_second_layer_weights, d_second_layer_correction, alpha * 0.2, n_of_second_layer_neurons); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "second_layer_weights_update kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); //********************************** //END OF SECOND_LAYER_WEIGHTS UPDATE //FIRST_LAYER_WEIGHTS UPDATE //************************** dim3 GridDim1(OptNofBlocksX, OptNofBlocksY); get_first_layer_correction << < GridDim1, OptNofThreads >> > (d_errors, d_images, d_first_layer_output, d_second_layer_output, d_third_layer_weights, d_second_layer_weights, d_first_layer_correction, d_sigmoid_multiplier, n_of_images, n_of_neurons, n_of_second_layer_neurons, n_of_third_layer_neurons, n_of_models); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "get_first_layer_correction kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); first_layer_weights_update << <OptNofBlocks, OptNofThreads >> > (d_first_layer_weights, d_first_layer_correction, alpha, n_of_neurons, n_of_second_layer_neurons); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "first_layer_weights_update kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); //****************************** //END FIRST_LAYER_WEIGHTS UPDATE if ((i + 1) % (500 / n_of_versions) == 0) { cudaMemcpy(first_layer_versions[n_of_v], d_first_layer_weights, sizeof(float)*n_of_second_layer_neurons*n_of_neurons, cudaMemcpyDeviceToHost); cudaMemcpy(second_layer_versions[n_of_v], d_second_layer_weights, sizeof(float)*n_of_third_layer_neurons*(n_of_second_layer_neurons + 1), cudaMemcpyDeviceToHost); cudaMemcpy(third_layer_versions[n_of_v], d_third_layer_weights, sizeof(float)*n_of_models*(n_of_third_layer_neurons + 1), cudaMemcpyDeviceToHost); n_of_v++; } } } } template <typename T> __host__ void executer(T* d_images, T* d_first_layer_weights, T* d_second_layer_weights, T* d_third_layer_weights, T* d_posibilities, int n_of_images, int n_of_neurons, int n_of_second_layer_neurons, int n_of_third_layer_neurons, int n_of_models) { float* d_first_layer_output, *d_second_layer_output, *d_third_layer_output, *d_third_layer_sums, *d_error_multiplier, *d_sigmoid_multiplier; cudaMalloc((void**)&d_first_layer_output, sizeof(float)*(n_of_second_layer_neurons + 1)*n_of_images); cudaMalloc((void**)&d_second_layer_output, sizeof(float)*(n_of_third_layer_neurons + 1)*n_of_models); cudaMalloc((void**)&d_third_layer_output, sizeof(float)*n_of_images*n_of_models); cudaMalloc((void**)&d_third_layer_sums, sizeof(float)*n_of_images); Error_check(cudaMalloc((void**)&d_sigmoid_multiplier, sizeof(float)*n_of_images*n_of_second_layer_neurons), "labels allocating on device"); Error_check(cudaMalloc((void**)&d_error_multiplier, sizeof(float)*n_of_images*n_of_third_layer_neurons), "labels allocating on device"); //GENERATING OUTPUT //***************** get_first_layer_output << < OptNofBlocks, OptNofThreads >> > (d_images, d_first_layer_weights, d_first_layer_output, d_sigmoid_multiplier, n_of_images, n_of_second_layer_neurons, n_of_neurons); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "get_first_layer_output kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); get_second_layer_output << < OptNofBlocks, OptNofThreads >> > (d_first_layer_output, d_second_layer_output, d_second_layer_weights, d_error_multiplier, n_of_images, n_of_second_layer_neurons, n_of_third_layer_neurons); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "get_second_layer_output kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); get_third_layer_output << < n_of_models, OptNofThreads >> > (d_third_layer_weights, d_second_layer_output, d_third_layer_output, d_third_layer_sums, n_of_images, n_of_third_layer_neurons); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "get_second_layer_output kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); get_posibilities << <OptNofBlocks, n_of_models >> > (d_third_layer_output, d_third_layer_sums, d_posibilities, n_of_images, n_of_models); { cudaError err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; } else { std::cout << "get_posibilities kernel launch without error" << std::endl; } } cudaDeviceSynchronize(); //********************* //END GENERATING OUTPUT cudaFree(d_first_layer_output); cudaFree(d_third_layer_sums); }
2479f9eae183b63f9022167c23d8bbbf54c8314e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if 1 /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ /* julia juliaJulia, Julia Julia Z(n+1)=Z(n)*Z(n)+C GPUJulia */ #include<windows.h> #include "../common/book.h" #include "../common/cpu_bitmap.h" #define DIM 500 struct hipComplex { float r; float i; hipComplex( float a, float b ) : r(a), i(b) {} float magnitude2( void ) { return r * r + i * i; }// hipComplex operator*(const hipComplex& a) { // return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } }; int julia( int x, int y ) // { const float scale = 1.5; //&scale float jx = scale * (float)(DIM/2 - x)/(DIM/2); // float jy = scale * (float)(DIM/2 - y)/(DIM/2); hipComplex c(-0.8, 0.156); //&C hipComplex a(jx, jy); //Z int i = 0; for (i=0; i<100; i++) { //$$$200() a = a * a + c; if (a.magnitude2() >1000) //1000julia return 0; } return 1; //Z(a)julia1 } /*kernel__global__ 00 DIM-1DIM-1 ptr girdDimgidDim .gridDim(DIM,DIM), ptr DIM*DIM-1 Julia */ /* __global__ void kernel(unsigned char *ptr) { //threadIdx/BlockIdx int x=blockIdx.x; int y=blockIdx.y; int offset=x+y*gridDim.x; // // int juliaValue=julia(x,y); ptr[offset*4+0]=255*juliaValue; ptr[offset*4+1]=0; ptr[offset*4+2]=0; ptr[offset*4+3]=255; } */ void kernel( unsigned char *ptr ){ for (int y=0; y<DIM; y++) { for (int x=0; x<DIM; x++) { int offset = x + y * DIM;// int juliaValue = julia( x, y ); ptr[offset*4 + 0] = 255 * juliaValue; //R ptr[offset*4 + 1] = 0; //G ptr[offset*4 + 2] = 0; //B ptr[offset*4 + 3] = 255; // } } } // int main( void ) { CPUBitmap bitmap( DIM, DIM ); // unsigned char *ptr = bitmap.get_ptr();// kernel( ptr ); bitmap.display_and_exit(); return 0; } #endif #if 0 /**/ LARGE_INTEGER t1,t2,tc;// QueryPerformanceFrequency(&tc);// QueryPerformanceCounter(&t1);// QueryPerformanceCounter(&t2);// printf("Use Time:%f\n",(t2.QuadPart-t1.QuadPart)*1.0/tc.QuadPart);// #endif
2479f9eae183b63f9022167c23d8bbbf54c8314e.cu
#if 1 /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ /* julia集是满足某个复数计算函数的所有点构成的边界。对于函数参数的所有取值,生成的边界将形成一种不规则的碎片形状,这是数学中最有趣和最漂亮的形状之一。 生成julia的算法非常简单,Julia集的基本算法是,通过一个简单的迭代等式对复平面中的点求值, 如果在计算某个点时,迭代等式的计算结果是发散的,那么这个点就不属于Julia集合, 相反,如果在迭代等式中计算得到的一系列值都位于某个边界范围之内,那么这个点就属于Julia集合。 迭代等式为: Z(n+1)=Z(n)*Z(n)+C 基于GPU的Julia集的算法如下: */ #include<windows.h> #include "../common/book.h" #include "../common/cpu_bitmap.h" #define DIM 500 struct cuComplex { float r; float i; cuComplex( float a, float b ) : r(a), i(b) {} float magnitude2( void ) { return r * r + i * i; }//成员函数,模的平方 cuComplex operator*(const cuComplex& a) { //成员函数,复数乘法的重载 return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } }; int julia( int x, int y ) //函数功能,将像素转换为得数 { const float scale = 1.5; //&scale 等级,放大系数(数值越小,越放大) float jx = scale * (float)(DIM/2 - x)/(DIM/2); //将像素坐标转换为复数空间的坐标 float jy = scale * (float)(DIM/2 - y)/(DIM/2); cuComplex c(-0.8, 0.156); //&迭代函数中常量C的定义 cuComplex a(jx, jy); //迭代函数中Z变量的初值 int i = 0; for (i=0; i<100; i++) { //$$$进行200次迭代(迭代次数越多,图像越精细) a = a * a + c; if (a.magnitude2() >1000) //模的平方大于1000(阈值)的话,则说明发散,不属于julia集 return 0; } return 1; //Z(a)属于julia集,则返加1 } /*第一步:将kernel声明为一个__global__ 类型的函数。线程格每一维的 大小与图像每一维大小是相等的,因些在(0,0) 和(DIM-1,DIM-1)之间的每个像素点都能获得一个线程块 第二步:得到输出 缓冲区ptr中的线性偏移,这个偏移是通中另一个 内置变量girdDim来计算的,对所有的线程块来说,gidDim是一个常数, 用来保存线程格第一维的大小.在示例中,gridDim的值是(DIM,DIM), 因此, 将行索引乘以线程格的宽,再加上列索引,就得到了ptr中的唯一索引, 其取值范围为(DIM*DIM-1)。 最后分析判断某个点是否属于Julia集的代码。 */ /* __global__ void kernel(unsigned char *ptr) { //将threadIdx/BlockIdx映身到像素位置 int x=blockIdx.x; int y=blockIdx.y; int offset=x+y*gridDim.x; //线程格一维的大小,是个常量 //计算这个位置上的值 int juliaValue=julia(x,y); ptr[offset*4+0]=255*juliaValue; ptr[offset*4+1]=0; ptr[offset*4+2]=0; ptr[offset*4+3]=255; } */ void kernel( unsigned char *ptr ){ for (int y=0; y<DIM; y++) { for (int x=0; x<DIM; x++) { int offset = x + y * DIM;//用一维表示图像中的所有点 int juliaValue = julia( x, y ); ptr[offset*4 + 0] = 255 * juliaValue; //R ptr[offset*4 + 1] = 0; //G ptr[offset*4 + 2] = 0; //B ptr[offset*4 + 3] = 255; //透明度 } } } //通过工具库创建一个大小合适的位图图像,用一个指向位图数据的指针传递给核函数。 int main( void ) { CPUBitmap bitmap( DIM, DIM ); //定义位图对象 unsigned char *ptr = bitmap.get_ptr();//用来保存设备上数据的副本。 kernel( ptr ); bitmap.display_and_exit(); return 0; } #endif #if 0 /*时间计时函数*/ LARGE_INTEGER t1,t2,tc;//定义时间变量 QueryPerformanceFrequency(&tc);// QueryPerformanceCounter(&t1);// ………… QueryPerformanceCounter(&t2);// printf("Use Time:%f\n",(t2.QuadPart-t1.QuadPart)*1.0/tc.QuadPart);//打印耗时 #endif
90633e0954e3a34ffa63069fe2aff1c592301e4b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Opener.h" #include <iostream> using std::cout; using std::endl; extern __global__ void sumKernel( float *a, float *b, float *c ); void hnd(const hipError_t &err) { if(err != hipSuccess) cout<<"hipError_t = "<<hipGetErrorString(err)<<endl; } void print(const hipDeviceProp_t &prop) { cout<<"\n###Device Properties###"<<endl; cout<<"Name: "<<prop.name<<endl; cout<<"Capability: "<<prop.major<<"."<<prop.minor<<endl; cout<<"Clock Rate: "<<prop.clockRate<<endl; cout<<"Overlap: "<<prop.deviceOverlap<<endl; cout<<"Timeout Enabled: "<<prop.kernelExecTimeoutEnabled<<endl; cout<<"Total Global Memory: "<<prop.totalGlobalMem<<endl; cout<<"Total Constant Memory: "<<prop.totalConstMem<<endl; cout<<"Max Memory Pitch: "<<prop.memPitch<<endl; cout<<"Texture Alignment: "<<prop.textureAlignment<<endl; cout<<"MultiProcessor Count: "<<prop.multiProcessorCount<<endl; cout<<"Shared Memory per MP: "<<prop.sharedMemPerBlock<<endl; cout<<"Registers per Block: "<<prop.regsPerBlock<<endl; cout<<"Warp Size: "<<prop.warpSize<<endl; cout<<"Max Threads per Block: "<<prop.maxThreadsPerBlock<<endl; cout<<"Max Threads Dim ("<<prop.maxThreadsDim[0]<<", "<<prop.maxThreadsDim[1]<<", "<<prop.maxThreadsDim[2]<<")"<<endl; cout<<"Max Grid Size ("<<prop.maxGridSize[0]<<", "<<prop.maxGridSize[1]<<", "<<prop.maxGridSize[2]<<")"<<endl; } void properties() { cout<<"properties"<<endl; int count; hnd( hipGetDeviceCount( &count ) ); hipDeviceProp_t prop; for (int i = 0; i < count; ++i) { hnd( hipGetDeviceProperties( &prop, i ) ); print(prop); } } void funcCuda( float *v1, int N1, float *v2, int N2, float* out, int N3 ) { typedef Opener<float, hipMalloc, hipFree> FloatOpener; properties(); Timer timer; timer.start(); int N = N1 < N2 ? N1 : N2; float *v1Dev = NULL; float *v2Dev = NULL; float *outDev = NULL; FloatOpener op1(v1Dev, N); FloatOpener op2(v2Dev, N); FloatOpener op3(outDev, N); int numBytes = N*sizeof(float); hnd( hipMemcpy( v1Dev, v1, numBytes, hipMemcpyHostToDevice ) ); hnd( hipMemcpy( v2Dev, v2, numBytes, hipMemcpyHostToDevice ) ); dim3 threads = dim3(64, 1); dim3 blocks = dim3(N/threads.x, 1); hipLaunchKernelGGL(( sumKernel), dim3(blocks), dim3(threads), 0, 0, v1Dev, v2Dev, outDev ); hnd( hipMemcpy( out, outDev, numBytes, hipMemcpyDeviceToHost ) ); timer.stop(); cout<<"gpuTime = "<<timer.elapsed()<<endl; }
90633e0954e3a34ffa63069fe2aff1c592301e4b.cu
#include "Opener.h" #include <iostream> using std::cout; using std::endl; extern __global__ void sumKernel( float *a, float *b, float *c ); void hnd(const cudaError_t &err) { if(err != cudaSuccess) cout<<"cudaError_t = "<<cudaGetErrorString(err)<<endl; } void print(const cudaDeviceProp &prop) { cout<<"\n###Device Properties###"<<endl; cout<<"Name: "<<prop.name<<endl; cout<<"Capability: "<<prop.major<<"."<<prop.minor<<endl; cout<<"Clock Rate: "<<prop.clockRate<<endl; cout<<"Overlap: "<<prop.deviceOverlap<<endl; cout<<"Timeout Enabled: "<<prop.kernelExecTimeoutEnabled<<endl; cout<<"Total Global Memory: "<<prop.totalGlobalMem<<endl; cout<<"Total Constant Memory: "<<prop.totalConstMem<<endl; cout<<"Max Memory Pitch: "<<prop.memPitch<<endl; cout<<"Texture Alignment: "<<prop.textureAlignment<<endl; cout<<"MultiProcessor Count: "<<prop.multiProcessorCount<<endl; cout<<"Shared Memory per MP: "<<prop.sharedMemPerBlock<<endl; cout<<"Registers per Block: "<<prop.regsPerBlock<<endl; cout<<"Warp Size: "<<prop.warpSize<<endl; cout<<"Max Threads per Block: "<<prop.maxThreadsPerBlock<<endl; cout<<"Max Threads Dim ("<<prop.maxThreadsDim[0]<<", "<<prop.maxThreadsDim[1]<<", "<<prop.maxThreadsDim[2]<<")"<<endl; cout<<"Max Grid Size ("<<prop.maxGridSize[0]<<", "<<prop.maxGridSize[1]<<", "<<prop.maxGridSize[2]<<")"<<endl; } void properties() { cout<<"properties"<<endl; int count; hnd( cudaGetDeviceCount( &count ) ); cudaDeviceProp prop; for (int i = 0; i < count; ++i) { hnd( cudaGetDeviceProperties( &prop, i ) ); print(prop); } } void funcCuda( float *v1, int N1, float *v2, int N2, float* out, int N3 ) { typedef Opener<float, cudaMalloc, cudaFree> FloatOpener; properties(); Timer timer; timer.start(); int N = N1 < N2 ? N1 : N2; float *v1Dev = NULL; float *v2Dev = NULL; float *outDev = NULL; FloatOpener op1(v1Dev, N); FloatOpener op2(v2Dev, N); FloatOpener op3(outDev, N); int numBytes = N*sizeof(float); hnd( cudaMemcpy( v1Dev, v1, numBytes, cudaMemcpyHostToDevice ) ); hnd( cudaMemcpy( v2Dev, v2, numBytes, cudaMemcpyHostToDevice ) ); dim3 threads = dim3(64, 1); dim3 blocks = dim3(N/threads.x, 1); sumKernel<<<blocks, threads>>>( v1Dev, v2Dev, outDev ); hnd( cudaMemcpy( out, outDev, numBytes, cudaMemcpyDeviceToHost ) ); timer.stop(); cout<<"gpuTime = "<<timer.elapsed()<<endl; }
937d8707925c6b787e09c3687c8acf2fff344b3f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "diff_reduce.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *dev_w = NULL; hipMalloc(&dev_w, XSIZE*YSIZE); double *feat = NULL; hipMalloc(&feat, XSIZE*YSIZE); double *pos = NULL; hipMalloc(&pos, XSIZE*YSIZE); int feat_dim = 1; int pos_dim = 1; int par0 = 1; int par1 = 1; int n_patch = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( diff_reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_w,feat,pos,feat_dim,pos_dim,par0,par1,n_patch); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( diff_reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_w,feat,pos,feat_dim,pos_dim,par0,par1,n_patch); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( diff_reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_w,feat,pos,feat_dim,pos_dim,par0,par1,n_patch); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
937d8707925c6b787e09c3687c8acf2fff344b3f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "diff_reduce.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *dev_w = NULL; cudaMalloc(&dev_w, XSIZE*YSIZE); double *feat = NULL; cudaMalloc(&feat, XSIZE*YSIZE); double *pos = NULL; cudaMalloc(&pos, XSIZE*YSIZE); int feat_dim = 1; int pos_dim = 1; int par0 = 1; int par1 = 1; int n_patch = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); diff_reduce<<<gridBlock,threadBlock>>>(dev_w,feat,pos,feat_dim,pos_dim,par0,par1,n_patch); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { diff_reduce<<<gridBlock,threadBlock>>>(dev_w,feat,pos,feat_dim,pos_dim,par0,par1,n_patch); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { diff_reduce<<<gridBlock,threadBlock>>>(dev_w,feat,pos,feat_dim,pos_dim,par0,par1,n_patch); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
87c01161d320a4e094e796f55e57763225bf8539.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorIndex.cu" #else // Check tensor dimensions for index operations, and return the slice size. // src can be nullptr in case of indexFill: in that case it is ignored. static ptrdiff_t THCTensor_(getSliceSize)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *index, THCTensor *src) { int dstDims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); int srcDims = (src == nullptr) ? dstDims : THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == 1, 4, "expecting vector of indices"); THArgCheck(dim >= 0 && dim < dstDims, 2, "Indexing dim is out of bounds"); ptrdiff_t dstSliceSize = 1; for (int d = 0; d < dstDims; d++) { if (d != dim) { dstSliceSize *= THTensor_sizeLegacyNoScalars(dst, d); } } if (src == nullptr) return dstSliceSize; THArgCheck(dim < srcDims, 3, "Indexing dim is out of bounds"); THArgCheck(THCudaLongTensor_nElement(state, index) == THTensor_sizeLegacyNoScalars(src, dim), 4, "length of src.size[dim] is not equal to length of indices"); ptrdiff_t srcSliceSize = 1; bool mismatch = false; if (dstDims != srcDims) mismatch = true; for (int d = 0; d < srcDims; d++) { if (d != dim) { srcSliceSize *= THTensor_sizeLegacyNoScalars(src, d); if (!mismatch && THTensor_sizeLegacyNoScalars(dst, d) != THTensor_sizeLegacyNoScalars(src, d)) mismatch = true; } } THArgCheck(dstSliceSize == srcSliceSize, 2, "Source/destination tensor have different slice sizes (%ld vs %ld)", dstSliceSize, srcSliceSize); if (mismatch) { static bool warningShown = false; if (!warningShown) { warningShown = true; fprintf(stderr, "Warning: source/destination slices have same size but different " "shape for an index operation. This behavior is deprecated.\n"); } } return dstSliceSize; } // Compare the stride between adjacent slices (sliceStride) with strides in the // other dimensions (i.e., strides *inside* each slice). // // - Returns true if some dimension inside the slice has lower stride than // sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim // == 0 (that is, each slice is a row). // // In this case, we choose the CUDA kernel that processes the data in // "index-major order". For example, if thread count equals slice size, then // all threads process slice #0 in lockstep, and then slice #1, and so on. // // - Otherwise (i.e., sliceStride has the lowest value), this function returns // false. The simplest example is a 2-D contiguous tensor with sliceDim == 1 // (each slice is a column). // // In this case, we choose the CUDA kernel that processes the data in // "elementInSlice-major order". For example, each thread can process element // #0 of every slice, and then element #1 of every slice, and so on. bool THCTensor_(indexShouldBeMajor)(TensorInfo<scalar_t, unsigned int> &info, int sliceDim) { // The stride between adjacent slices (e.g., between element #0 of slice #100 // and element #0 of slice #101). unsigned int sliceStride = info.strides[sliceDim]; for (int i = 0; i < info.dims; ++i) { if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) { return true; } } return false; } void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src); ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src); int64_t dstCopyDimSize = THCTensor_(sizeLegacyNoScalars)(state, dst, dim); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); if (sliceSize == 0) { return; } hipStream_t stream = THCState_getCurrentStream(state); int indContig = THCudaLongTensor_isContiguous(state, indices); int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ dstInfo, srcInfo, indicesInfo, \ dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexCopyLargeIndex<TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ dstInfo, srcInfo, indicesInfo, \ dstCopyDim, srcCopyDim, srcTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ dstCopyDimSize); dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); TensorInfo<scalar_t, unsigned int> srcInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstCopyDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); TensorInfo<scalar_t, uint64_t> srcInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLongTensor *index) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(!(THCTensor_(numel)(state, src) == 0 && THCudaLongTensor_numel(state, index) != 0), 2, "tried to take from an empty tensor"); THCTensor_(resizeNd)(state, dst, index->dim(), THTensor_getSizePtr(index), NULL); dispatchTakePut<scalar_t, TensorTakeOp>(state, src, dst, index); } static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) { THCThrustAllocator thrustAlloc(state); auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index)); auto src_iter = thrust::device_ptr<scalar_t>(THCTensor_(data)(state, src)); auto numel = THCTensor_(numel)(state, src); thrust::sort_by_key( thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), index_iter, index_iter + numel, src_iter, ThrustLTOp<int64_t>()); } void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); ptrdiff_t dstSize = THCTensor_(nElement)(state, dst); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index); THArgCheck(THCTensor_(nElement)(state, src) == numIndices, 3, "src should have the same number of elements as index"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); if (numIndices == 0) { return; } if (accumulate) { // wrap indices so to replace negative indices THCudaLongTensor* sorted_index = THCudaLongTensor_new(state); THCudaLongTensor_resizeAs(state, sorted_index, index); THC_pointwiseApply2<int64_t, int64_t>(state, sorted_index, index, WrapIndexOp(dstSize)); THCTensor* sorted_src = THCTensor_(newClone)(state, src); THCTensor_(sort_indices)(state, sorted_index, sorted_src); dispatchTakePut<scalar_t, TensorPutAccumulateOp>(state, dst, sorted_src, sorted_index); THCTensor_(free)(state, sorted_src); THCudaLongTensor_free(state, sorted_index); } else { dispatchTakePut<scalar_t, TensorPutOp>(state, dst, src, index); } } void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src); ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src); int64_t dstAddDimSize = THCTensor_(sizeLegacyNoScalars)(state, dst, dim); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); if (sliceSize == 0) { return; } hipStream_t stream = THCState_getCurrentStream(state); int indContig = THCudaLongTensor_isContiguous(state, indices); int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ dstInfo, srcInfo, indicesInfo, \ dstAddDim, srcAddDim, sliceSize, dstAddDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ dstInfo, srcInfo, indicesInfo, \ dstAddDim, srcAddDim, srcTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ dstAddDimSize); dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstAddDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstAddDim); TensorInfo<scalar_t, unsigned int> srcInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src); int srcAddDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcAddDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstAddDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstAddDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstAddDim); TensorInfo<scalar_t, uint64_t> srcInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src); int srcAddDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcAddDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, scalar_t val) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, nullptr); ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst); int64_t dstFillDimSize = THCTensor_(sizeLegacyNoScalars)(state, dst, dim); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); if (sliceSize == 0) { return; } hipStream_t stream = THCState_getCurrentStream(state); int indContig = THCudaLongTensor_isContiguous(state, indices); int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ dstInfo, indicesInfo, \ dstFillDim, sliceSize, dstFillDimSize, val); #define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ dstInfo, indicesInfo, \ dstFillDim, sliceSize * numIndices, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ dstFillDimSize, val); dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, -2); } else if (dstInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, -2); } else if (dstInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstFillDim); if (dstInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, -2, true); } else if (dstInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, -2, false); } } else if (dstInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices)); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); int srcDims = THCTensor_(nDimensionLegacyNoScalars)(state, src); hipStream_t stream = THCState_getCurrentStream(state); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, indices) <= 1, 3, "Index is supposed to be an empty tensor or a vector"); THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds"); THArgCheck(srcDims > 0, 2, "Source tensor is empty"); std::vector<int64_t> newSize = THTensor_sizesLegacyNoScalars(src); newSize[dim] = numIndices; THCTensor_(resize)(state, dst, newSize, {}); ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst); if (dstTotalSize == 0) { return; } int indContig = THCudaLongTensor_isContiguous(state, indices); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. int64_t srcSelectDimSize = THCTensor_(sizeLegacyNoScalars)(state, src, dim); ptrdiff_t sliceSize = dstTotalSize / numIndices; int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ dstInfo, srcInfo, indicesInfo, \ dstSelectDim, srcSelectDim, sliceSize, srcSelectDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ dstInfo, srcInfo, indicesInfo, \ dstSelectDim, srcSelectDim, dstTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ srcSelectDimSize); dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstSelectDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstSelectDim); TensorInfo<scalar_t, unsigned int> srcInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src); int srcSelectDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcSelectDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstSelectDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstSelectDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstSelectDim); TensorInfo<scalar_t, uint64_t> srcInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src); int srcSelectDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcSelectDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } #endif
87c01161d320a4e094e796f55e57763225bf8539.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorIndex.cu" #else // Check tensor dimensions for index operations, and return the slice size. // src can be nullptr in case of indexFill: in that case it is ignored. static ptrdiff_t THCTensor_(getSliceSize)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *index, THCTensor *src) { int dstDims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); int srcDims = (src == nullptr) ? dstDims : THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == 1, 4, "expecting vector of indices"); THArgCheck(dim >= 0 && dim < dstDims, 2, "Indexing dim is out of bounds"); ptrdiff_t dstSliceSize = 1; for (int d = 0; d < dstDims; d++) { if (d != dim) { dstSliceSize *= THTensor_sizeLegacyNoScalars(dst, d); } } if (src == nullptr) return dstSliceSize; THArgCheck(dim < srcDims, 3, "Indexing dim is out of bounds"); THArgCheck(THCudaLongTensor_nElement(state, index) == THTensor_sizeLegacyNoScalars(src, dim), 4, "length of src.size[dim] is not equal to length of indices"); ptrdiff_t srcSliceSize = 1; bool mismatch = false; if (dstDims != srcDims) mismatch = true; for (int d = 0; d < srcDims; d++) { if (d != dim) { srcSliceSize *= THTensor_sizeLegacyNoScalars(src, d); if (!mismatch && THTensor_sizeLegacyNoScalars(dst, d) != THTensor_sizeLegacyNoScalars(src, d)) mismatch = true; } } THArgCheck(dstSliceSize == srcSliceSize, 2, "Source/destination tensor have different slice sizes (%ld vs %ld)", dstSliceSize, srcSliceSize); if (mismatch) { static bool warningShown = false; if (!warningShown) { warningShown = true; fprintf(stderr, "Warning: source/destination slices have same size but different " "shape for an index operation. This behavior is deprecated.\n"); } } return dstSliceSize; } // Compare the stride between adjacent slices (sliceStride) with strides in the // other dimensions (i.e., strides *inside* each slice). // // - Returns true if some dimension inside the slice has lower stride than // sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim // == 0 (that is, each slice is a row). // // In this case, we choose the CUDA kernel that processes the data in // "index-major order". For example, if thread count equals slice size, then // all threads process slice #0 in lockstep, and then slice #1, and so on. // // - Otherwise (i.e., sliceStride has the lowest value), this function returns // false. The simplest example is a 2-D contiguous tensor with sliceDim == 1 // (each slice is a column). // // In this case, we choose the CUDA kernel that processes the data in // "elementInSlice-major order". For example, each thread can process element // #0 of every slice, and then element #1 of every slice, and so on. bool THCTensor_(indexShouldBeMajor)(TensorInfo<scalar_t, unsigned int> &info, int sliceDim) { // The stride between adjacent slices (e.g., between element #0 of slice #100 // and element #0 of slice #101). unsigned int sliceStride = info.strides[sliceDim]; for (int i = 0; i < info.dims; ++i) { if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) { return true; } } return false; } void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src); ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src); int64_t dstCopyDimSize = THCTensor_(sizeLegacyNoScalars)(state, dst, dim); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); if (sliceSize == 0) { return; } cudaStream_t stream = THCState_getCurrentStream(state); int indContig = THCudaLongTensor_isContiguous(state, indices); int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexCopyLargeIndex<TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstCopyDim, srcCopyDim, srcTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ dstCopyDimSize); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); TensorInfo<scalar_t, unsigned int> srcInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstCopyDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); TensorInfo<scalar_t, uint64_t> srcInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLongTensor *index) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(!(THCTensor_(numel)(state, src) == 0 && THCudaLongTensor_numel(state, index) != 0), 2, "tried to take from an empty tensor"); THCTensor_(resizeNd)(state, dst, index->dim(), THTensor_getSizePtr(index), NULL); dispatchTakePut<scalar_t, TensorTakeOp>(state, src, dst, index); } static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) { THCThrustAllocator thrustAlloc(state); auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index)); auto src_iter = thrust::device_ptr<scalar_t>(THCTensor_(data)(state, src)); auto numel = THCTensor_(numel)(state, src); thrust::sort_by_key( thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), index_iter, index_iter + numel, src_iter, ThrustLTOp<int64_t>()); } void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); ptrdiff_t dstSize = THCTensor_(nElement)(state, dst); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index); THArgCheck(THCTensor_(nElement)(state, src) == numIndices, 3, "src should have the same number of elements as index"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); if (numIndices == 0) { return; } if (accumulate) { // wrap indices so to replace negative indices THCudaLongTensor* sorted_index = THCudaLongTensor_new(state); THCudaLongTensor_resizeAs(state, sorted_index, index); THC_pointwiseApply2<int64_t, int64_t>(state, sorted_index, index, WrapIndexOp(dstSize)); THCTensor* sorted_src = THCTensor_(newClone)(state, src); THCTensor_(sort_indices)(state, sorted_index, sorted_src); dispatchTakePut<scalar_t, TensorPutAccumulateOp>(state, dst, sorted_src, sorted_index); THCTensor_(free)(state, sorted_src); THCudaLongTensor_free(state, sorted_index); } else { dispatchTakePut<scalar_t, TensorPutOp>(state, dst, src, index); } } void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src); ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src); int64_t dstAddDimSize = THCTensor_(sizeLegacyNoScalars)(state, dst, dim); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); if (sliceSize == 0) { return; } cudaStream_t stream = THCState_getCurrentStream(state); int indContig = THCudaLongTensor_isContiguous(state, indices); int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstAddDim, srcAddDim, sliceSize, dstAddDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexAddLargeIndex<TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstAddDim, srcAddDim, srcTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ dstAddDimSize); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstAddDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstAddDim); TensorInfo<scalar_t, unsigned int> srcInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src); int srcAddDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcAddDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstAddDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstAddDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstAddDim); TensorInfo<scalar_t, uint64_t> srcInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src); int srcAddDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcAddDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, scalar_t val) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, nullptr); ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst); int64_t dstFillDimSize = THCTensor_(sizeLegacyNoScalars)(state, dst, dim); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); if (sliceSize == 0) { return; } cudaStream_t stream = THCState_getCurrentStream(state); int indContig = THCudaLongTensor_isContiguous(state, indices); int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \ indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ dstInfo, indicesInfo, \ dstFillDim, sliceSize, dstFillDimSize, val); #define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ dstInfo, indicesInfo, \ dstFillDim, sliceSize * numIndices, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ dstFillDimSize, val); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, -2); } else if (dstInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, -2); } else if (dstInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstFillDim); if (dstInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, -2, true); } else if (dstInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, -2, false); } } else if (dstInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices)); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); int srcDims = THCTensor_(nDimensionLegacyNoScalars)(state, src); cudaStream_t stream = THCState_getCurrentStream(state); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, indices) <= 1, 3, "Index is supposed to be an empty tensor or a vector"); THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds"); THArgCheck(srcDims > 0, 2, "Source tensor is empty"); std::vector<int64_t> newSize = THTensor_sizesLegacyNoScalars(src); newSize[dim] = numIndices; THCTensor_(resize)(state, dst, newSize, {}); ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst); if (dstTotalSize == 0) { return; } int indContig = THCudaLongTensor_isContiguous(state, indices); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. int64_t srcSelectDimSize = THCTensor_(sizeLegacyNoScalars)(state, src, dim); ptrdiff_t sliceSize = dstTotalSize / numIndices; int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstSelectDim, srcSelectDim, sliceSize, srcSelectDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexSelectLargeIndex<TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstSelectDim, srcSelectDim, dstTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ srcSelectDimSize); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstSelectDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstSelectDim); TensorInfo<scalar_t, unsigned int> srcInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src); int srcSelectDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcSelectDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstSelectDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstSelectDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstSelectDim); TensorInfo<scalar_t, uint64_t> srcInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src); int srcSelectDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcSelectDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } #endif
measure_system.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright Carl Pearson 2020 - 2021. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE or copy at // https://www.boost.org/LICENSE_1_0.txt) #include "measure_system.hpp" #include "allocators.hpp" #include "benchmark.hpp" #include "cuda_runtime.hpp" #include "logging.hpp" #include "numeric.hpp" #include "packer_2d.hpp" #include "symbols.hpp" #include "topology.hpp" #include <mpi.h> #include <chrono> #include <iostream> typedef std::chrono::high_resolution_clock Clock; typedef std::chrono::duration<double> Duration; typedef std::chrono::time_point<Clock, Duration> Time; /*extern*/ SystemPerformance systemPerformance; void measure_system_init() { import_system_performance(systemPerformance); } static __global__ void kernel(int *a) { if (a) { *a = threadIdx.x; } } class KernelLaunchBenchmark : public Benchmark { hipStream_t stream; public: KernelLaunchBenchmark() { CUDA_RUNTIME(hipStreamCreate(&stream)); } ~KernelLaunchBenchmark() { CUDA_RUNTIME(hipStreamDestroy(stream)); } Benchmark::Sample run_iter() override { Sample res{}; Time start = Clock::now(); for (int i = 0; i < 32; ++i) { hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, stream, nullptr); } Time stop = Clock::now(); Duration dur = stop - start; CUDA_RUNTIME(hipStreamSynchronize(stream)); res.time = dur.count() / 32.0; return res; } }; /* The time to call CudaMemcpyAsync */ class CudaMemcpyAsync : public Benchmark { hipStream_t stream; public: CudaMemcpyAsync() { CUDA_RUNTIME(hipStreamCreate(&stream)); } ~CudaMemcpyAsync() { CUDA_RUNTIME(hipStreamDestroy(stream)); } Benchmark::Sample run_iter() override { Sample res{}; Time start = Clock::now(); for (int i = 0; i < 32; ++i) { hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, stream, nullptr); } Time stop = Clock::now(); Duration dur = stop - start; CUDA_RUNTIME(hipStreamSynchronize(stream)); res.time = dur.count() / 32.0; return res; } }; class CudaMemcpyAsyncD2H : public Benchmark { hipStream_t stream; char *src, *dst; size_t n_; public: CudaMemcpyAsyncD2H(size_t n) : n_(n) { CUDA_RUNTIME(hipStreamCreate(&stream)); CUDA_RUNTIME(hipMalloc(&src, n)); dst = hostAllocator.allocate(n); } ~CudaMemcpyAsyncD2H() { CUDA_RUNTIME(hipStreamDestroy(stream)); CUDA_RUNTIME(hipFree(src)); hostAllocator.deallocate(dst, n_); } void setup() { nreps_ = 1; run_iter(); // warmup Benchmark::Sample s = run_iter(); // measure time // target at least 200us nreps_ = 200.0 * 1e-6 / s.time; nreps_ = ::max(nreps_, 1); LOG_DEBUG("estimate nreps_=" << nreps_ << " for 200us"); } Benchmark::Sample run_iter() override { Sample res{}; Time start = Clock::now(); for (int i = 0; i < nreps_; ++i) { hipMemcpyAsync(dst, src, n_, hipMemcpyDeviceToHost, stream); hipStreamSynchronize(stream); } Time stop = Clock::now(); CUDA_RUNTIME(hipGetLastError()); Duration dur = stop - start; res.time = dur.count() / double(nreps_); return res; } }; class CudaMemcpyAsyncH2D : public Benchmark { hipStream_t stream; char *src, *dst; size_t n_; public: CudaMemcpyAsyncH2D(size_t n) : n_(n) { CUDA_RUNTIME(hipStreamCreate(&stream)); CUDA_RUNTIME(hipMalloc(&dst, n)); src = hostAllocator.allocate(n); // std::memset(src, -1, n); // cache host buffer } ~CudaMemcpyAsyncH2D() { CUDA_RUNTIME(hipStreamDestroy(stream)); CUDA_RUNTIME(hipFree(dst)); hostAllocator.deallocate(src, n_); } Benchmark::Sample run_iter() override { Sample res{}; Time start = Clock::now(); const int nreps = 10; for (int i = 0; i < nreps; ++i) { hipMemcpyAsync(dst, src, n_, hipMemcpyHostToDevice, stream); hipStreamSynchronize(stream); } Time stop = Clock::now(); CUDA_RUNTIME(hipGetLastError()); Duration dur = stop - start; res.time = dur.count() / double(nreps); return res; } }; /* a cpu-to-cpu MPI ping-pong test between ranks 0 and 1 in TEMPI, this is used with the hostAllocator */ class CpuCpuPingpong : public MpiBenchmark { char *buf_; int n_; public: // zero buffer to put it in cache CpuCpuPingpong(size_t n, MPI_Comm comm) : buf_(nullptr), n_(n), MpiBenchmark(comm) { buf_ = hostAllocator.allocate(n); } ~CpuCpuPingpong() { hostAllocator.deallocate(buf_, n_); } Benchmark::Sample run_iter() override { int rank; MPI_Comm_rank(comm_, &rank); MPI_Barrier(comm_); Time start = Clock::now(); for (int i = 0; i < nreps_; ++i) { if (0 == rank) { libmpi.MPI_Send(buf_, n_, MPI_BYTE, 1, 0, comm_); libmpi.MPI_Recv(buf_, n_, MPI_BYTE, 1, 0, comm_, MPI_STATUS_IGNORE); } else if (1 == rank) { libmpi.MPI_Recv(buf_, n_, MPI_BYTE, 0, 0, comm_, MPI_STATUS_IGNORE); libmpi.MPI_Send(buf_, n_, MPI_BYTE, 0, 0, comm_); } } Time stop = Clock::now(); Duration dur = stop - start; double time = dur.count() / 2, maxTime; MPI_Allreduce(&time, &maxTime, 1, MPI_DOUBLE, MPI_MAX, comm_); Sample res{}; res.time = maxTime / double(nreps_); return res; } }; /* a gpu-to-gpu MPI ping-pong test */ class GpuGpuPingpong : public MpiBenchmark { char *buf_; size_t n_; public: GpuGpuPingpong(size_t n, MPI_Comm comm) : n_(n), MpiBenchmark(comm) { CUDA_RUNTIME(hipMalloc(&buf_, n)); } ~GpuGpuPingpong() { CUDA_RUNTIME(hipFree(buf_)); } Benchmark::Sample run_iter() override { int rank; MPI_Comm_rank(comm_, &rank); MPI_Barrier(comm_); Time start = Clock::now(); for (int i = 0; i < nreps_; ++i) { if (0 == rank) { libmpi.MPI_Send(buf_, n_, MPI_BYTE, 1, 0, comm_); libmpi.MPI_Recv(buf_, n_, MPI_BYTE, 1, 0, comm_, MPI_STATUS_IGNORE); } else if (1 == rank) { libmpi.MPI_Recv(buf_, n_, MPI_BYTE, 0, 0, comm_, MPI_STATUS_IGNORE); libmpi.MPI_Send(buf_, n_, MPI_BYTE, 0, 0, comm_); } } Time stop = Clock::now(); Duration dur = stop - start; double time = dur.count() / 2, maxTime; MPI_Allreduce(&time, &maxTime, 1, MPI_DOUBLE, MPI_MAX, comm_); Sample res{}; res.time = maxTime / double(nreps_); return res; } }; /* Synchronous pack, as used in MPI_Send */ class DevicePack2D : public Benchmark { char *src, *dst; int64_t numBlocks_; int64_t blockLength_; int64_t stride_; Packer2D packer_; public: DevicePack2D(int64_t numBlocks, int64_t blockLength, int64_t stride) : packer_(0, blockLength, numBlocks, stride, numBlocks * stride) { CUDA_RUNTIME(hipMalloc(&src, numBlocks * stride)); CUDA_RUNTIME(hipMalloc(&dst, numBlocks * blockLength)); } ~DevicePack2D() { CUDA_RUNTIME(hipFree(src)); CUDA_RUNTIME(hipFree(dst)); } Benchmark::Sample run_iter() override { int pos = 0; Sample res{}; double start = MPI_Wtime(); packer_.pack(dst, &pos, src, 1); res.time = MPI_Wtime() - start; return res; } }; class DeviceUnpack2D : public Benchmark { char *src, *dst; int64_t numBlocks_; int64_t blockLength_; int64_t stride_; Packer2D packer_; public: DeviceUnpack2D(int64_t numBlocks, int64_t blockLength, int64_t stride) : packer_(0, blockLength, numBlocks, stride, numBlocks * stride) { CUDA_RUNTIME(hipMalloc(&src, numBlocks * blockLength)); CUDA_RUNTIME(hipMalloc(&dst, numBlocks * stride)); } ~DeviceUnpack2D() { CUDA_RUNTIME(hipFree(src)); CUDA_RUNTIME(hipFree(dst)); } Benchmark::Sample run_iter() override { int pos = 0; Sample res{}; double start = MPI_Wtime(); packer_.unpack(src, &pos, dst, 1); res.time = MPI_Wtime() - start; return res; } }; /* Synchronous pack, as used in MPI_Send */ class HostPack2D : public Benchmark { char *src, *dst; int64_t numBlocks_; int64_t blockLength_; int64_t stride_; Packer2D packer_; public: HostPack2D(int64_t numBlocks, int64_t blockLength, int64_t stride) : numBlocks_(numBlocks), blockLength_(blockLength), packer_(0, blockLength, numBlocks, stride, numBlocks * stride) { CUDA_RUNTIME(hipMalloc(&src, numBlocks * stride)); dst = hostAllocator.allocate(numBlocks * blockLength); } ~HostPack2D() { CUDA_RUNTIME(hipFree(src)); hostAllocator.deallocate(dst, numBlocks_ * blockLength_); } Benchmark::Sample run_iter() override { int pos = 0; Sample res{}; double start = MPI_Wtime(); packer_.pack(dst, &pos, src, 1); res.time = MPI_Wtime() - start; return res; } }; class HostUnpack2D : public Benchmark { char *src, *dst; int64_t numBlocks_; int64_t blockLength_; int64_t stride_; Packer2D packer_; public: HostUnpack2D(int64_t numBlocks, int64_t blockLength, int64_t stride) : numBlocks_(numBlocks), blockLength_(blockLength), packer_(0, blockLength, numBlocks, stride, numBlocks * stride) { src = hostAllocator.allocate(numBlocks * blockLength); CUDA_RUNTIME(hipMalloc(&dst, numBlocks * stride)); } ~HostUnpack2D() { hostAllocator.deallocate(src, numBlocks_ * blockLength_); src = {}; CUDA_RUNTIME(hipFree(dst)); } Benchmark::Sample run_iter() override { int pos = 0; Sample res{}; double start = MPI_Wtime(); packer_.unpack(src, &pos, dst, 1); res.time = MPI_Wtime() - start; return res; } }; /* fill any missing entries in sp */ void measure_system_performance(SystemPerformance &sp, MPI_Comm comm) { using topology::node_of_rank; int rank, size; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &size); MPI_Barrier(comm); if (0 == rank) { std::cerr << "CUDA kernel\n"; std::cerr << "bytes,s,niters\n"; } if (rank == 0 && sp.cudaKernelLaunch == 0) { KernelLaunchBenchmark bm; Benchmark::Result res = bm.run(Benchmark::RunConfig()); std::cerr << "=== " << res.trimean << " " << res.nIters << " ===\n"; sp.cudaKernelLaunch = res.trimean; } MPI_Barrier(comm); if (0 == rank) { std::cerr << "D2H\n"; std::cerr << "bytes,s,niters\n"; } if (rank == 0 && sp.d2h.empty()) { for (int i = 1; i <= 8 * 1024 * 1024; i *= 2) { CudaMemcpyAsyncD2H bm(i); Benchmark::Result res = bm.run(Benchmark::RunConfig()); if (0 == rank) { std::cerr << i << "," << res.trimean << "," << res.nIters << "\n"; } sp.d2h.push_back(IidTime{.time = res.trimean, .iid = res.iid}); } } MPI_Barrier(comm); if (0 == rank) { std::cerr << "H2D\n"; std::cerr << "bytes,s,niters\n"; } if (rank == 0 && sp.h2d.empty()) { for (int i = 1; i <= 8 * 1024 * 1024; i *= 2) { CudaMemcpyAsyncH2D bm(i); Benchmark::Result res = bm.run(Benchmark::RunConfig()); if (0 == rank) { std::cerr << i << "," << res.trimean << "," << res.nIters << "\n"; } sp.h2d.push_back(IidTime{.time = res.trimean, .iid = res.iid}); } } MPI_Barrier(comm); if (0 == rank) { std::cerr << "intra-node CPU-CPU\n"; std::cerr << "bytes,s\n"; } if (size >= 2 && (node_of_rank(comm, 0) == node_of_rank(comm, 1)) && sp.intraNodeCpuCpuPingpong.empty()) { for (int i = 1; i <= 8 * 1024 * 1024; i *= 2) { CpuCpuPingpong bm(i, MPI_COMM_WORLD); Benchmark::Result res = bm.run(Benchmark::RunConfig()); if (0 == rank) { std::cerr << i << "," << res.trimean << "\n"; } sp.intraNodeCpuCpuPingpong.push_back( IidTime{.time = res.trimean, .iid = res.iid}); } } MPI_Barrier(comm); if (0 == rank) { std::cerr << "intra-node GPU-GPU\n"; std::cerr << "bytes,s\n"; } if (size >= 2 && (node_of_rank(comm, 0) == node_of_rank(comm, 1)) && sp.intraNodeGpuGpuPingpong.empty()) { for (int i = 1; i <= 8 * 1024 * 1024; i *= 2) { GpuGpuPingpong bm(i, MPI_COMM_WORLD); Benchmark::Result res = bm.run(Benchmark::RunConfig()); if (0 == rank) { std::cerr << i << "," << res.trimean << "\n"; } sp.intraNodeGpuGpuPingpong.push_back( IidTime{.time = res.trimean, .iid = res.iid}); } } MPI_Barrier(comm); if (0 == rank) { std::cerr << "inter-node CPU-CPU\n"; std::cerr << "bytes,s\n"; } if (size >= 2 && (node_of_rank(comm, 0) != node_of_rank(comm, 1)) && sp.interNodeCpuCpuPingpong.empty()) { for (int i = 1; i <= 8 * 1024 * 1024; i *= 2) { CpuCpuPingpong bm(i, MPI_COMM_WORLD); Benchmark::Result res = bm.run(Benchmark::RunConfig()); if (0 == rank) { std::cerr << i << "," << res.trimean << "\n"; } sp.interNodeCpuCpuPingpong.push_back( IidTime{.time = res.trimean, .iid = res.iid}); } } else { LOG_WARN("skip interNodeCpuCpuPingpong"); } MPI_Barrier(comm); if (0 == rank) { std::cerr << "inter-node GPU-GPU\n"; std::cerr << "bytes,s\n"; } if (size >= 2 && (node_of_rank(comm, 0) != node_of_rank(comm, 1)) && sp.interNodeGpuGpuPingpong.empty()) { for (int i = 1; i <= 8 * 1024 * 1024; i *= 2) { GpuGpuPingpong bm(i, MPI_COMM_WORLD); Benchmark::Result res = bm.run(Benchmark::RunConfig()); if (0 == rank) { std::cerr << i << "," << res.trimean << "\n"; } sp.interNodeGpuGpuPingpong.push_back( IidTime{.time = res.trimean, .iid = res.iid}); } } else { LOG_WARN("skip interNodeGpuGpuPingpong"); } MPI_Barrier(comm); if (0 == rank) { std::cerr << "HostPack2D\n"; std::cerr << "bytes,blockLength,s,niters\n"; } if (rank == 0 && sp.packHost.empty()) { for (int i = 0; i < 9; ++i) { sp.packHost.push_back({}); for (int j = 0; j < 9; ++j) { int64_t bytes = 1ull << (2 * i + 6); int64_t blockLength = 1ull << j; // no blocklength larger than bytes blockLength = min(bytes, blockLength); int64_t numBlocks = bytes / blockLength; HostPack2D bm(numBlocks, blockLength, 512); Benchmark::Result res = bm.run(Benchmark::RunConfig()); std::cerr << bytes << "," << blockLength << "," << res.trimean << "," << res.nIters << "\n"; sp.packHost[i].push_back(IidTime{.time = res.trimean, .iid = res.iid}); } } } MPI_Barrier(comm); if (0 == rank) { std::cerr << "HostUnpack2D\n"; std::cerr << "bytes,blockLength,s,niters\n"; } if (rank == 0 && sp.unpackHost.empty()) { for (int i = 0; i < 9; ++i) { sp.unpackHost.push_back({}); for (int j = 0; j < 9; ++j) { int64_t bytes = 1ull << (2 * i + 6); int64_t blockLength = 1ull << j; // no blocklength larger than bytes blockLength = min(bytes, blockLength); int64_t numBlocks = bytes / blockLength; HostUnpack2D bm(numBlocks, blockLength, 512); Benchmark::Result res = bm.run(Benchmark::RunConfig()); std::cerr << bytes << "," << blockLength << "," << res.trimean << "," << res.nIters << "\n"; sp.unpackHost[i].push_back( IidTime{.time = res.trimean, .iid = res.iid}); } } } MPI_Barrier(comm); if (0 == rank) { std::cerr << "DevicePack2D\n"; std::cerr << "bytes,blockLength,s,niters\n"; } if (rank == 0 && sp.packDevice.empty()) { for (int i = 0; i < 9; ++i) { sp.packDevice.push_back({}); for (int j = 0; j < 9; ++j) { int64_t bytes = 1ull << (2 * i + 6); int64_t blockLength = 1ull << j; // no blocklength larger than bytes blockLength = min(bytes, blockLength); int64_t numBlocks = bytes / blockLength; DevicePack2D bm(numBlocks, blockLength, 512); Benchmark::Result res = bm.run(Benchmark::RunConfig()); std::cerr << bytes << "," << blockLength << "," << res.trimean << "," << res.nIters << "\n"; sp.packDevice[i].push_back( IidTime{.time = res.trimean, .iid = res.iid}); ; } } } MPI_Barrier(comm); if (0 == rank) { std::cerr << "DeviceUnpack2D\n"; std::cerr << "bytes,blockLength,s,niters\n"; } if (rank == 0 && sp.unpackDevice.empty()) { for (int i = 0; i < 9; ++i) { sp.unpackDevice.push_back({}); for (int j = 0; j < 9; ++j) { int64_t bytes = 1ull << (2 * i + 6); int64_t blockLength = 1ull << j; // no blocklength larger than bytes blockLength = min(bytes, blockLength); int64_t numBlocks = bytes / blockLength; DeviceUnpack2D bm(numBlocks, blockLength, 512); Benchmark::Result res = bm.run(Benchmark::RunConfig()); std::cerr << bytes << "," << blockLength << "," << res.trimean << "," << res.nIters << "\n"; sp.unpackDevice[i].push_back( IidTime{.time = res.trimean, .iid = res.iid}); ; } } } }
measure_system.cu
// Copyright Carl Pearson 2020 - 2021. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE or copy at // https://www.boost.org/LICENSE_1_0.txt) #include "measure_system.hpp" #include "allocators.hpp" #include "benchmark.hpp" #include "cuda_runtime.hpp" #include "logging.hpp" #include "numeric.hpp" #include "packer_2d.hpp" #include "symbols.hpp" #include "topology.hpp" #include <mpi.h> #include <chrono> #include <iostream> typedef std::chrono::high_resolution_clock Clock; typedef std::chrono::duration<double> Duration; typedef std::chrono::time_point<Clock, Duration> Time; /*extern*/ SystemPerformance systemPerformance; void measure_system_init() { import_system_performance(systemPerformance); } static __global__ void kernel(int *a) { if (a) { *a = threadIdx.x; } } class KernelLaunchBenchmark : public Benchmark { cudaStream_t stream; public: KernelLaunchBenchmark() { CUDA_RUNTIME(cudaStreamCreate(&stream)); } ~KernelLaunchBenchmark() { CUDA_RUNTIME(cudaStreamDestroy(stream)); } Benchmark::Sample run_iter() override { Sample res{}; Time start = Clock::now(); for (int i = 0; i < 32; ++i) { kernel<<<1, 1, 0, stream>>>(nullptr); } Time stop = Clock::now(); Duration dur = stop - start; CUDA_RUNTIME(cudaStreamSynchronize(stream)); res.time = dur.count() / 32.0; return res; } }; /* The time to call CudaMemcpyAsync */ class CudaMemcpyAsync : public Benchmark { cudaStream_t stream; public: CudaMemcpyAsync() { CUDA_RUNTIME(cudaStreamCreate(&stream)); } ~CudaMemcpyAsync() { CUDA_RUNTIME(cudaStreamDestroy(stream)); } Benchmark::Sample run_iter() override { Sample res{}; Time start = Clock::now(); for (int i = 0; i < 32; ++i) { kernel<<<1, 1, 0, stream>>>(nullptr); } Time stop = Clock::now(); Duration dur = stop - start; CUDA_RUNTIME(cudaStreamSynchronize(stream)); res.time = dur.count() / 32.0; return res; } }; class CudaMemcpyAsyncD2H : public Benchmark { cudaStream_t stream; char *src, *dst; size_t n_; public: CudaMemcpyAsyncD2H(size_t n) : n_(n) { CUDA_RUNTIME(cudaStreamCreate(&stream)); CUDA_RUNTIME(cudaMalloc(&src, n)); dst = hostAllocator.allocate(n); } ~CudaMemcpyAsyncD2H() { CUDA_RUNTIME(cudaStreamDestroy(stream)); CUDA_RUNTIME(cudaFree(src)); hostAllocator.deallocate(dst, n_); } void setup() { nreps_ = 1; run_iter(); // warmup Benchmark::Sample s = run_iter(); // measure time // target at least 200us nreps_ = 200.0 * 1e-6 / s.time; nreps_ = std::max(nreps_, 1); LOG_DEBUG("estimate nreps_=" << nreps_ << " for 200us"); } Benchmark::Sample run_iter() override { Sample res{}; Time start = Clock::now(); for (int i = 0; i < nreps_; ++i) { cudaMemcpyAsync(dst, src, n_, cudaMemcpyDeviceToHost, stream); cudaStreamSynchronize(stream); } Time stop = Clock::now(); CUDA_RUNTIME(cudaGetLastError()); Duration dur = stop - start; res.time = dur.count() / double(nreps_); return res; } }; class CudaMemcpyAsyncH2D : public Benchmark { cudaStream_t stream; char *src, *dst; size_t n_; public: CudaMemcpyAsyncH2D(size_t n) : n_(n) { CUDA_RUNTIME(cudaStreamCreate(&stream)); CUDA_RUNTIME(cudaMalloc(&dst, n)); src = hostAllocator.allocate(n); // std::memset(src, -1, n); // cache host buffer } ~CudaMemcpyAsyncH2D() { CUDA_RUNTIME(cudaStreamDestroy(stream)); CUDA_RUNTIME(cudaFree(dst)); hostAllocator.deallocate(src, n_); } Benchmark::Sample run_iter() override { Sample res{}; Time start = Clock::now(); const int nreps = 10; for (int i = 0; i < nreps; ++i) { cudaMemcpyAsync(dst, src, n_, cudaMemcpyHostToDevice, stream); cudaStreamSynchronize(stream); } Time stop = Clock::now(); CUDA_RUNTIME(cudaGetLastError()); Duration dur = stop - start; res.time = dur.count() / double(nreps); return res; } }; /* a cpu-to-cpu MPI ping-pong test between ranks 0 and 1 in TEMPI, this is used with the hostAllocator */ class CpuCpuPingpong : public MpiBenchmark { char *buf_; int n_; public: // zero buffer to put it in cache CpuCpuPingpong(size_t n, MPI_Comm comm) : buf_(nullptr), n_(n), MpiBenchmark(comm) { buf_ = hostAllocator.allocate(n); } ~CpuCpuPingpong() { hostAllocator.deallocate(buf_, n_); } Benchmark::Sample run_iter() override { int rank; MPI_Comm_rank(comm_, &rank); MPI_Barrier(comm_); Time start = Clock::now(); for (int i = 0; i < nreps_; ++i) { if (0 == rank) { libmpi.MPI_Send(buf_, n_, MPI_BYTE, 1, 0, comm_); libmpi.MPI_Recv(buf_, n_, MPI_BYTE, 1, 0, comm_, MPI_STATUS_IGNORE); } else if (1 == rank) { libmpi.MPI_Recv(buf_, n_, MPI_BYTE, 0, 0, comm_, MPI_STATUS_IGNORE); libmpi.MPI_Send(buf_, n_, MPI_BYTE, 0, 0, comm_); } } Time stop = Clock::now(); Duration dur = stop - start; double time = dur.count() / 2, maxTime; MPI_Allreduce(&time, &maxTime, 1, MPI_DOUBLE, MPI_MAX, comm_); Sample res{}; res.time = maxTime / double(nreps_); return res; } }; /* a gpu-to-gpu MPI ping-pong test */ class GpuGpuPingpong : public MpiBenchmark { char *buf_; size_t n_; public: GpuGpuPingpong(size_t n, MPI_Comm comm) : n_(n), MpiBenchmark(comm) { CUDA_RUNTIME(cudaMalloc(&buf_, n)); } ~GpuGpuPingpong() { CUDA_RUNTIME(cudaFree(buf_)); } Benchmark::Sample run_iter() override { int rank; MPI_Comm_rank(comm_, &rank); MPI_Barrier(comm_); Time start = Clock::now(); for (int i = 0; i < nreps_; ++i) { if (0 == rank) { libmpi.MPI_Send(buf_, n_, MPI_BYTE, 1, 0, comm_); libmpi.MPI_Recv(buf_, n_, MPI_BYTE, 1, 0, comm_, MPI_STATUS_IGNORE); } else if (1 == rank) { libmpi.MPI_Recv(buf_, n_, MPI_BYTE, 0, 0, comm_, MPI_STATUS_IGNORE); libmpi.MPI_Send(buf_, n_, MPI_BYTE, 0, 0, comm_); } } Time stop = Clock::now(); Duration dur = stop - start; double time = dur.count() / 2, maxTime; MPI_Allreduce(&time, &maxTime, 1, MPI_DOUBLE, MPI_MAX, comm_); Sample res{}; res.time = maxTime / double(nreps_); return res; } }; /* Synchronous pack, as used in MPI_Send */ class DevicePack2D : public Benchmark { char *src, *dst; int64_t numBlocks_; int64_t blockLength_; int64_t stride_; Packer2D packer_; public: DevicePack2D(int64_t numBlocks, int64_t blockLength, int64_t stride) : packer_(0, blockLength, numBlocks, stride, numBlocks * stride) { CUDA_RUNTIME(cudaMalloc(&src, numBlocks * stride)); CUDA_RUNTIME(cudaMalloc(&dst, numBlocks * blockLength)); } ~DevicePack2D() { CUDA_RUNTIME(cudaFree(src)); CUDA_RUNTIME(cudaFree(dst)); } Benchmark::Sample run_iter() override { int pos = 0; Sample res{}; double start = MPI_Wtime(); packer_.pack(dst, &pos, src, 1); res.time = MPI_Wtime() - start; return res; } }; class DeviceUnpack2D : public Benchmark { char *src, *dst; int64_t numBlocks_; int64_t blockLength_; int64_t stride_; Packer2D packer_; public: DeviceUnpack2D(int64_t numBlocks, int64_t blockLength, int64_t stride) : packer_(0, blockLength, numBlocks, stride, numBlocks * stride) { CUDA_RUNTIME(cudaMalloc(&src, numBlocks * blockLength)); CUDA_RUNTIME(cudaMalloc(&dst, numBlocks * stride)); } ~DeviceUnpack2D() { CUDA_RUNTIME(cudaFree(src)); CUDA_RUNTIME(cudaFree(dst)); } Benchmark::Sample run_iter() override { int pos = 0; Sample res{}; double start = MPI_Wtime(); packer_.unpack(src, &pos, dst, 1); res.time = MPI_Wtime() - start; return res; } }; /* Synchronous pack, as used in MPI_Send */ class HostPack2D : public Benchmark { char *src, *dst; int64_t numBlocks_; int64_t blockLength_; int64_t stride_; Packer2D packer_; public: HostPack2D(int64_t numBlocks, int64_t blockLength, int64_t stride) : numBlocks_(numBlocks), blockLength_(blockLength), packer_(0, blockLength, numBlocks, stride, numBlocks * stride) { CUDA_RUNTIME(cudaMalloc(&src, numBlocks * stride)); dst = hostAllocator.allocate(numBlocks * blockLength); } ~HostPack2D() { CUDA_RUNTIME(cudaFree(src)); hostAllocator.deallocate(dst, numBlocks_ * blockLength_); } Benchmark::Sample run_iter() override { int pos = 0; Sample res{}; double start = MPI_Wtime(); packer_.pack(dst, &pos, src, 1); res.time = MPI_Wtime() - start; return res; } }; class HostUnpack2D : public Benchmark { char *src, *dst; int64_t numBlocks_; int64_t blockLength_; int64_t stride_; Packer2D packer_; public: HostUnpack2D(int64_t numBlocks, int64_t blockLength, int64_t stride) : numBlocks_(numBlocks), blockLength_(blockLength), packer_(0, blockLength, numBlocks, stride, numBlocks * stride) { src = hostAllocator.allocate(numBlocks * blockLength); CUDA_RUNTIME(cudaMalloc(&dst, numBlocks * stride)); } ~HostUnpack2D() { hostAllocator.deallocate(src, numBlocks_ * blockLength_); src = {}; CUDA_RUNTIME(cudaFree(dst)); } Benchmark::Sample run_iter() override { int pos = 0; Sample res{}; double start = MPI_Wtime(); packer_.unpack(src, &pos, dst, 1); res.time = MPI_Wtime() - start; return res; } }; /* fill any missing entries in sp */ void measure_system_performance(SystemPerformance &sp, MPI_Comm comm) { using topology::node_of_rank; int rank, size; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &size); MPI_Barrier(comm); if (0 == rank) { std::cerr << "CUDA kernel\n"; std::cerr << "bytes,s,niters\n"; } if (rank == 0 && sp.cudaKernelLaunch == 0) { KernelLaunchBenchmark bm; Benchmark::Result res = bm.run(Benchmark::RunConfig()); std::cerr << "=== " << res.trimean << " " << res.nIters << " ===\n"; sp.cudaKernelLaunch = res.trimean; } MPI_Barrier(comm); if (0 == rank) { std::cerr << "D2H\n"; std::cerr << "bytes,s,niters\n"; } if (rank == 0 && sp.d2h.empty()) { for (int i = 1; i <= 8 * 1024 * 1024; i *= 2) { CudaMemcpyAsyncD2H bm(i); Benchmark::Result res = bm.run(Benchmark::RunConfig()); if (0 == rank) { std::cerr << i << "," << res.trimean << "," << res.nIters << "\n"; } sp.d2h.push_back(IidTime{.time = res.trimean, .iid = res.iid}); } } MPI_Barrier(comm); if (0 == rank) { std::cerr << "H2D\n"; std::cerr << "bytes,s,niters\n"; } if (rank == 0 && sp.h2d.empty()) { for (int i = 1; i <= 8 * 1024 * 1024; i *= 2) { CudaMemcpyAsyncH2D bm(i); Benchmark::Result res = bm.run(Benchmark::RunConfig()); if (0 == rank) { std::cerr << i << "," << res.trimean << "," << res.nIters << "\n"; } sp.h2d.push_back(IidTime{.time = res.trimean, .iid = res.iid}); } } MPI_Barrier(comm); if (0 == rank) { std::cerr << "intra-node CPU-CPU\n"; std::cerr << "bytes,s\n"; } if (size >= 2 && (node_of_rank(comm, 0) == node_of_rank(comm, 1)) && sp.intraNodeCpuCpuPingpong.empty()) { for (int i = 1; i <= 8 * 1024 * 1024; i *= 2) { CpuCpuPingpong bm(i, MPI_COMM_WORLD); Benchmark::Result res = bm.run(Benchmark::RunConfig()); if (0 == rank) { std::cerr << i << "," << res.trimean << "\n"; } sp.intraNodeCpuCpuPingpong.push_back( IidTime{.time = res.trimean, .iid = res.iid}); } } MPI_Barrier(comm); if (0 == rank) { std::cerr << "intra-node GPU-GPU\n"; std::cerr << "bytes,s\n"; } if (size >= 2 && (node_of_rank(comm, 0) == node_of_rank(comm, 1)) && sp.intraNodeGpuGpuPingpong.empty()) { for (int i = 1; i <= 8 * 1024 * 1024; i *= 2) { GpuGpuPingpong bm(i, MPI_COMM_WORLD); Benchmark::Result res = bm.run(Benchmark::RunConfig()); if (0 == rank) { std::cerr << i << "," << res.trimean << "\n"; } sp.intraNodeGpuGpuPingpong.push_back( IidTime{.time = res.trimean, .iid = res.iid}); } } MPI_Barrier(comm); if (0 == rank) { std::cerr << "inter-node CPU-CPU\n"; std::cerr << "bytes,s\n"; } if (size >= 2 && (node_of_rank(comm, 0) != node_of_rank(comm, 1)) && sp.interNodeCpuCpuPingpong.empty()) { for (int i = 1; i <= 8 * 1024 * 1024; i *= 2) { CpuCpuPingpong bm(i, MPI_COMM_WORLD); Benchmark::Result res = bm.run(Benchmark::RunConfig()); if (0 == rank) { std::cerr << i << "," << res.trimean << "\n"; } sp.interNodeCpuCpuPingpong.push_back( IidTime{.time = res.trimean, .iid = res.iid}); } } else { LOG_WARN("skip interNodeCpuCpuPingpong"); } MPI_Barrier(comm); if (0 == rank) { std::cerr << "inter-node GPU-GPU\n"; std::cerr << "bytes,s\n"; } if (size >= 2 && (node_of_rank(comm, 0) != node_of_rank(comm, 1)) && sp.interNodeGpuGpuPingpong.empty()) { for (int i = 1; i <= 8 * 1024 * 1024; i *= 2) { GpuGpuPingpong bm(i, MPI_COMM_WORLD); Benchmark::Result res = bm.run(Benchmark::RunConfig()); if (0 == rank) { std::cerr << i << "," << res.trimean << "\n"; } sp.interNodeGpuGpuPingpong.push_back( IidTime{.time = res.trimean, .iid = res.iid}); } } else { LOG_WARN("skip interNodeGpuGpuPingpong"); } MPI_Barrier(comm); if (0 == rank) { std::cerr << "HostPack2D\n"; std::cerr << "bytes,blockLength,s,niters\n"; } if (rank == 0 && sp.packHost.empty()) { for (int i = 0; i < 9; ++i) { sp.packHost.push_back({}); for (int j = 0; j < 9; ++j) { int64_t bytes = 1ull << (2 * i + 6); int64_t blockLength = 1ull << j; // no blocklength larger than bytes blockLength = min(bytes, blockLength); int64_t numBlocks = bytes / blockLength; HostPack2D bm(numBlocks, blockLength, 512); Benchmark::Result res = bm.run(Benchmark::RunConfig()); std::cerr << bytes << "," << blockLength << "," << res.trimean << "," << res.nIters << "\n"; sp.packHost[i].push_back(IidTime{.time = res.trimean, .iid = res.iid}); } } } MPI_Barrier(comm); if (0 == rank) { std::cerr << "HostUnpack2D\n"; std::cerr << "bytes,blockLength,s,niters\n"; } if (rank == 0 && sp.unpackHost.empty()) { for (int i = 0; i < 9; ++i) { sp.unpackHost.push_back({}); for (int j = 0; j < 9; ++j) { int64_t bytes = 1ull << (2 * i + 6); int64_t blockLength = 1ull << j; // no blocklength larger than bytes blockLength = min(bytes, blockLength); int64_t numBlocks = bytes / blockLength; HostUnpack2D bm(numBlocks, blockLength, 512); Benchmark::Result res = bm.run(Benchmark::RunConfig()); std::cerr << bytes << "," << blockLength << "," << res.trimean << "," << res.nIters << "\n"; sp.unpackHost[i].push_back( IidTime{.time = res.trimean, .iid = res.iid}); } } } MPI_Barrier(comm); if (0 == rank) { std::cerr << "DevicePack2D\n"; std::cerr << "bytes,blockLength,s,niters\n"; } if (rank == 0 && sp.packDevice.empty()) { for (int i = 0; i < 9; ++i) { sp.packDevice.push_back({}); for (int j = 0; j < 9; ++j) { int64_t bytes = 1ull << (2 * i + 6); int64_t blockLength = 1ull << j; // no blocklength larger than bytes blockLength = min(bytes, blockLength); int64_t numBlocks = bytes / blockLength; DevicePack2D bm(numBlocks, blockLength, 512); Benchmark::Result res = bm.run(Benchmark::RunConfig()); std::cerr << bytes << "," << blockLength << "," << res.trimean << "," << res.nIters << "\n"; sp.packDevice[i].push_back( IidTime{.time = res.trimean, .iid = res.iid}); ; } } } MPI_Barrier(comm); if (0 == rank) { std::cerr << "DeviceUnpack2D\n"; std::cerr << "bytes,blockLength,s,niters\n"; } if (rank == 0 && sp.unpackDevice.empty()) { for (int i = 0; i < 9; ++i) { sp.unpackDevice.push_back({}); for (int j = 0; j < 9; ++j) { int64_t bytes = 1ull << (2 * i + 6); int64_t blockLength = 1ull << j; // no blocklength larger than bytes blockLength = min(bytes, blockLength); int64_t numBlocks = bytes / blockLength; DeviceUnpack2D bm(numBlocks, blockLength, 512); Benchmark::Result res = bm.run(Benchmark::RunConfig()); std::cerr << bytes << "," << blockLength << "," << res.trimean << "," << res.nIters << "\n"; sp.unpackDevice[i].push_back( IidTime{.time = res.trimean, .iid = res.iid}); ; } } } }
34b376f15eba6a722f299c015b5510641d25d502.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<algorithm> #include<stdio.h> #include<fstream> #include <stdlib.h> #include <malloc.h> using namespace std; #define REPEAT 1 #define Real unsigned int #define STRIDE 1 #define CACHELINE 8 #define ALLIGNMENT 64 #define BLOCK 16 __global__ void VecAdd(Real** A, int* N, unsigned long long* d_time, int* xj, int* xi); int main(int argc, char* argv[]) { if(argc != 2) { std::cout << "Wrong number of argument!! Exiting program !!!"; return 0; } // struct timeval tv1, tv2; int N = atoi(argv[1]); //register long elapsed=0; Real **h_A, **d_A; int *d_N;// *B, *C, *d_A, *d_B, *d_C ; std::ofstream fp; fp.open("/home/hpc/ihpc/ihpc002h/gpu-exp/Master-thesis/exp2/data/result.txt", std::ofstream::app); unsigned long long *d_time, *h_time; Real *xj; A = (Real*)memalign(ALLIGNMENT,(N+2)*sizeof(Real)); h_time = (unsigned long long *)memalign(ALLIGNMENT, N*sizeof(unsigned long long)/BLOCK); hipMalloc(&d_A, (N)*sizeof(Real)); hipMalloc(&d_time, N*sizeof(unsigned long long)/BLOCK); hipMalloc(&xj, sizeof(int)); hipMalloc(&d_N, sizeof(Real)); for(int i=0; i < N ; i++) { A[i] = (i+STRIDE)%N; // B[i] = rand()%5; //C[i] = 0.0; } A[N]=0; hipMemcpy(d_A, h_A, (N+2)*sizeof(Real), hipMemcpyHostToDevice ); hipMemcpy(d_N, &N, sizeof(int), hipMemcpyHostToDevice ); //hipMemcpy(d_C, C, N*sizeof(Real), hipMemcpyHostToDevice ); //for(int i=0 ; i < REPEAT ; i++) //{ //gettimeofday(&tv1, NULL); hipLaunchKernelGGL(( VecAdd), dim3(1),dim3(1), 0, 0, d_A, d_N, d_time, xj); //gettimeofday(&tv2, NULL); //elapsed += ((tv2.tv_sec-tv1.tv_sec)*1000000.0 + (tv2.tv_usec-tv1.tv_usec)); //} hipMemcpy(h_time, d_time, N*sizeof(unsigned long long)/BLOCK, hipMemcpyDeviceToHost); hipDeviceSynchronize(); //cout << N << std::endl; //double td = (N * 8.0)/1024; //cout << h_time << " " << CLOCKS_PER_SEC << std::endl; //fp << N*4.0/1024.0 << " " << h_time << std::endl; for(int i =0; i < N/BLOCK ; i++) { fp << (i+1)*BLOCK << " " << *(h_time+i) << std::endl; } //fs << std::endl; hipFree(d_A); free(A); //hipFree(d_B); //hipFree(d_C); fp.close(); }
34b376f15eba6a722f299c015b5510641d25d502.cu
#include<iostream> #include<algorithm> #include<stdio.h> #include<fstream> #include <stdlib.h> #include <malloc.h> using namespace std; #define REPEAT 1 #define Real unsigned int #define STRIDE 1 #define CACHELINE 8 #define ALLIGNMENT 64 #define BLOCK 16 __global__ void VecAdd(Real** A, int* N, unsigned long long* d_time, int* xj, int* xi); int main(int argc, char* argv[]) { if(argc != 2) { std::cout << "Wrong number of argument!! Exiting program !!!"; return 0; } // struct timeval tv1, tv2; int N = atoi(argv[1]); //register long elapsed=0; Real **h_A, **d_A; int *d_N;// *B, *C, *d_A, *d_B, *d_C ; std::ofstream fp; fp.open("/home/hpc/ihpc/ihpc002h/gpu-exp/Master-thesis/exp2/data/result.txt", std::ofstream::app); unsigned long long *d_time, *h_time; Real *xj; A = (Real*)memalign(ALLIGNMENT,(N+2)*sizeof(Real)); h_time = (unsigned long long *)memalign(ALLIGNMENT, N*sizeof(unsigned long long)/BLOCK); cudaMalloc(&d_A, (N)*sizeof(Real)); cudaMalloc(&d_time, N*sizeof(unsigned long long)/BLOCK); cudaMalloc(&xj, sizeof(int)); cudaMalloc(&d_N, sizeof(Real)); for(int i=0; i < N ; i++) { A[i] = (i+STRIDE)%N; // B[i] = rand()%5; //C[i] = 0.0; } A[N]=0; cudaMemcpy(d_A, h_A, (N+2)*sizeof(Real), cudaMemcpyHostToDevice ); cudaMemcpy(d_N, &N, sizeof(int), cudaMemcpyHostToDevice ); //cudaMemcpy(d_C, C, N*sizeof(Real), cudaMemcpyHostToDevice ); //for(int i=0 ; i < REPEAT ; i++) //{ //gettimeofday(&tv1, NULL); VecAdd<<<1,1>>>(d_A, d_N, d_time, xj); //gettimeofday(&tv2, NULL); //elapsed += ((tv2.tv_sec-tv1.tv_sec)*1000000.0 + (tv2.tv_usec-tv1.tv_usec)); //} cudaMemcpy(h_time, d_time, N*sizeof(unsigned long long)/BLOCK, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); //cout << N << std::endl; //double td = (N * 8.0)/1024; //cout << h_time << " " << CLOCKS_PER_SEC << std::endl; //fp << N*4.0/1024.0 << " " << h_time << std::endl; for(int i =0; i < N/BLOCK ; i++) { fp << (i+1)*BLOCK << " " << *(h_time+i) << std::endl; } //fs << std::endl; cudaFree(d_A); free(A); //cudaFree(d_B); //cudaFree(d_C); fp.close(); }
668b62d5aa132f23807c2f2b7c16af68ff15f794.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "../DEM2DForceGPU.cu" typedef DEMEvaluator<Scalar, Scalar4, WCAPotential<Scalar, Scalar4, NoFriction<Scalar> > > WCADEM; template hipError_t gpu_compute_dem2d_forces<Scalar, Scalar2, Scalar4, WCADEM>( Scalar4* d_force, Scalar4* d_torque, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const unsigned int n_ghosts, const Scalar4 *d_pos, const Scalar4 *d_quat, const Scalar2 *d_vertices, const unsigned int *d_num_shape_verts, const Scalar* d_diam, const Scalar4 *d_velocity, const unsigned int vertexCount, const BoxDim& box, const unsigned int *d_n_neigh, const unsigned int *d_nlist, const unsigned int *d_head_list, const WCADEM potential, const Scalar r_cutsq, const unsigned int n_shapes, const unsigned int particlesPerBlock, const unsigned int maxVerts);
668b62d5aa132f23807c2f2b7c16af68ff15f794.cu
// Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "../DEM2DForceGPU.cu" typedef DEMEvaluator<Scalar, Scalar4, WCAPotential<Scalar, Scalar4, NoFriction<Scalar> > > WCADEM; template cudaError_t gpu_compute_dem2d_forces<Scalar, Scalar2, Scalar4, WCADEM>( Scalar4* d_force, Scalar4* d_torque, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const unsigned int n_ghosts, const Scalar4 *d_pos, const Scalar4 *d_quat, const Scalar2 *d_vertices, const unsigned int *d_num_shape_verts, const Scalar* d_diam, const Scalar4 *d_velocity, const unsigned int vertexCount, const BoxDim& box, const unsigned int *d_n_neigh, const unsigned int *d_nlist, const unsigned int *d_head_list, const WCADEM potential, const Scalar r_cutsq, const unsigned int n_shapes, const unsigned int particlesPerBlock, const unsigned int maxVerts);
a8045c22020921a19c5dc3f75507b830c6e00f8e.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <hip/hip_runtime.h> #include "macros.h" #include "cuda_utils.h" __global__ void grid_interp_cuda_kernel( const torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> vol, const torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> points, torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> output, int channels, int3 nGrids, size_t size) { const int tx = blockIdx.x * blockDim.x + threadIdx.x; const int ty = blockIdx.y * blockDim.y + threadIdx.y; const int tz = blockIdx.z * blockDim.z + threadIdx.z; const int index = (tz * blockDim.y * gridDim.y + ty) * blockDim.x * gridDim.x + tx; if (index >= size) { return; } const float x = points[index][0]; const float y = points[index][1]; const float z = points[index][2]; const int ix = (int)x; const int iy = (int)y; const int iz = (int)z; const float fx = x - ix; const float fy = y - iy; const float fz = z - iz; for (int c = 0; c < channels; c++) { const int x0 = max(0, min(ix, nGrids.x - 1)); const int x1 = max(0, min(ix + 1, nGrids.x - 1)); const int y0 = max(0, min(iy, nGrids.y - 1)); const int y1 = max(0, min(iy + 1, nGrids.y - 1)); const int z0 = max(0, min(iz, nGrids.z - 1)); const int z1 = max(0, min(iz + 1, nGrids.z - 1)); const float v00 = (1.0 - fx) * vol[c][z0][y0][x0] + fx * vol[c][z0][y0][x1]; const float v01 = (1.0 - fx) * vol[c][z0][y1][x0] + fx * vol[c][z0][y1][x1]; const float v10 = (1.0 - fx) * vol[c][z1][y0][x0] + fx * vol[c][z1][y0][x1]; const float v11 = (1.0 - fx) * vol[c][z1][y1][x0] + fx * vol[c][z1][y1][x1]; const float v0 = (1.0 - fy) * v00 + fy * v01; const float v1 = (1.0 - fy) * v10 + fy * v11; output[index][c] = (1.0 - fz) * v0 + fz * v1; } } torch::Tensor grid_interp_cuda(torch::Tensor vol, torch::Tensor points) { // Check input tensors CHECK_CUDA(vol); CHECK_CONTIGUOUS(vol); CHECK_IS_FLOAT(vol); CHECK_N_DIM(vol, 4); CHECK_CUDA(points); CHECK_CONTIGUOUS(points); CHECK_IS_FLOAT(vol); CHECK_N_DIM(points, 2); // Size parameters const int Nx = vol.size(3); const int Ny = vol.size(2); const int Nz = vol.size(1); const int C = vol.size(0); const int Np = points.size(0); const int deviceId = vol.device().index(); torch::Tensor output = torch::zeros({Np, C}, torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, deviceId)); auto volAscr = vol.packed_accessor32<float, 4, torch::RestrictPtrTraits>(); auto ptsAscr = points.packed_accessor32<float, 2, torch::RestrictPtrTraits>(); auto outAscr = output.packed_accessor32<float, 2, torch::RestrictPtrTraits>(); const uint32_t MAX_THREADS_AXIS = 128; const uint32_t MAX_THREADS_AXIS2 = MAX_THREADS_AXIS * MAX_THREADS_AXIS; const uint32_t blockx = MAX_THREADS_AXIS; const uint32_t blocky = MAX_THREADS_AXIS; const uint32_t blockz = (Np + MAX_THREADS_AXIS2 - 1) / MAX_THREADS_AXIS2; const uint32_t BLOCK_SIZE = 8; const uint32_t gridx = (blockx + BLOCK_SIZE - 1) / BLOCK_SIZE; const uint32_t gridy = (blocky + BLOCK_SIZE - 1) / BLOCK_SIZE; const uint32_t gridz = (blockz + BLOCK_SIZE - 1) / BLOCK_SIZE; const int3 nGrids = make_int3(Nx, Ny, Nz); const dim3 blocks = { gridx, gridy, gridz }; const dim3 threads = { BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE }; const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( grid_interp_cuda_kernel), dim3(blocks), dim3(threads), 0, stream, volAscr, ptsAscr, outAscr, C, nGrids, Np); CUDA_CHECK_ERRORS(); return output; }
a8045c22020921a19c5dc3f75507b830c6e00f8e.cu
#include <torch/extension.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <cuda_runtime.h> #include "macros.h" #include "cuda_utils.h" __global__ void grid_interp_cuda_kernel( const torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> vol, const torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> points, torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> output, int channels, int3 nGrids, size_t size) { const int tx = blockIdx.x * blockDim.x + threadIdx.x; const int ty = blockIdx.y * blockDim.y + threadIdx.y; const int tz = blockIdx.z * blockDim.z + threadIdx.z; const int index = (tz * blockDim.y * gridDim.y + ty) * blockDim.x * gridDim.x + tx; if (index >= size) { return; } const float x = points[index][0]; const float y = points[index][1]; const float z = points[index][2]; const int ix = (int)x; const int iy = (int)y; const int iz = (int)z; const float fx = x - ix; const float fy = y - iy; const float fz = z - iz; for (int c = 0; c < channels; c++) { const int x0 = max(0, min(ix, nGrids.x - 1)); const int x1 = max(0, min(ix + 1, nGrids.x - 1)); const int y0 = max(0, min(iy, nGrids.y - 1)); const int y1 = max(0, min(iy + 1, nGrids.y - 1)); const int z0 = max(0, min(iz, nGrids.z - 1)); const int z1 = max(0, min(iz + 1, nGrids.z - 1)); const float v00 = (1.0 - fx) * vol[c][z0][y0][x0] + fx * vol[c][z0][y0][x1]; const float v01 = (1.0 - fx) * vol[c][z0][y1][x0] + fx * vol[c][z0][y1][x1]; const float v10 = (1.0 - fx) * vol[c][z1][y0][x0] + fx * vol[c][z1][y0][x1]; const float v11 = (1.0 - fx) * vol[c][z1][y1][x0] + fx * vol[c][z1][y1][x1]; const float v0 = (1.0 - fy) * v00 + fy * v01; const float v1 = (1.0 - fy) * v10 + fy * v11; output[index][c] = (1.0 - fz) * v0 + fz * v1; } } torch::Tensor grid_interp_cuda(torch::Tensor vol, torch::Tensor points) { // Check input tensors CHECK_CUDA(vol); CHECK_CONTIGUOUS(vol); CHECK_IS_FLOAT(vol); CHECK_N_DIM(vol, 4); CHECK_CUDA(points); CHECK_CONTIGUOUS(points); CHECK_IS_FLOAT(vol); CHECK_N_DIM(points, 2); // Size parameters const int Nx = vol.size(3); const int Ny = vol.size(2); const int Nz = vol.size(1); const int C = vol.size(0); const int Np = points.size(0); const int deviceId = vol.device().index(); torch::Tensor output = torch::zeros({Np, C}, torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, deviceId)); auto volAscr = vol.packed_accessor32<float, 4, torch::RestrictPtrTraits>(); auto ptsAscr = points.packed_accessor32<float, 2, torch::RestrictPtrTraits>(); auto outAscr = output.packed_accessor32<float, 2, torch::RestrictPtrTraits>(); const uint32_t MAX_THREADS_AXIS = 128; const uint32_t MAX_THREADS_AXIS2 = MAX_THREADS_AXIS * MAX_THREADS_AXIS; const uint32_t blockx = MAX_THREADS_AXIS; const uint32_t blocky = MAX_THREADS_AXIS; const uint32_t blockz = (Np + MAX_THREADS_AXIS2 - 1) / MAX_THREADS_AXIS2; const uint32_t BLOCK_SIZE = 8; const uint32_t gridx = (blockx + BLOCK_SIZE - 1) / BLOCK_SIZE; const uint32_t gridy = (blocky + BLOCK_SIZE - 1) / BLOCK_SIZE; const uint32_t gridz = (blockz + BLOCK_SIZE - 1) / BLOCK_SIZE; const int3 nGrids = make_int3(Nx, Ny, Nz); const dim3 blocks = { gridx, gridy, gridz }; const dim3 threads = { BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE }; const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); grid_interp_cuda_kernel<<<blocks, threads, 0, stream>>>(volAscr, ptsAscr, outAscr, C, nGrids, Np); CUDA_CHECK_ERRORS(); return output; }
99c8bc0354466c8851c97456f998c9a095650778.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // __constant__ int opDat1_res_stride_OP2CONSTANT; int opDat1_res_stride_OP2HOST=-1; __constant__ int direct_res_stride_OP2CONSTANT; int direct_res_stride_OP2HOST=-1; //user function __device__ void res_gpu( const double *A, const float *u, float *du, const float *beta) { *du += (float)((*beta) * (A[(0)*direct_res_stride_OP2CONSTANT]) * (u[(0)*opDat1_res_stride_OP2CONSTANT])); } // CUDA kernel function __global__ void op_cuda_res( const float *__restrict ind_arg0, float *__restrict ind_arg1, const int *__restrict opDat1Map, const double *__restrict arg0, const float *arg3, int start, int end, int set_size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid + start < end) { int n = tid + start; //initialise local variables float arg2_l[3]; for ( int d=0; d<3; d++ ){ arg2_l[d] = ZERO_float; } int map1idx; int map2idx; map1idx = opDat1Map[n + set_size * 1]; map2idx = opDat1Map[n + set_size * 0]; //user-supplied kernel call res_gpu(arg0+n, ind_arg0+map1idx, arg2_l, arg3); atomicAdd(&ind_arg1[0*opDat1_res_stride_OP2CONSTANT+map2idx],arg2_l[0]); atomicAdd(&ind_arg1[1*opDat1_res_stride_OP2CONSTANT+map2idx],arg2_l[1]); atomicAdd(&ind_arg1[2*opDat1_res_stride_OP2CONSTANT+map2idx],arg2_l[2]); } } //host stub function void op_par_loop_res(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3){ float*arg3h = (float *)arg3.data; int nargs = 4; op_arg args[4]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(0); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[0].name = name; OP_kernels[0].count += 1; int ninds = 2; int inds[4] = {-1,0,1,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: res\n"); } int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { //transfer constants to GPU int consts_bytes = 0; consts_bytes += ROUND_UP(1*sizeof(float)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg3.data = OP_consts_h + consts_bytes; arg3.data_d = OP_consts_d + consts_bytes; for ( int d=0; d<1; d++ ){ ((float *)arg3.data)[d] = arg3h[d]; } consts_bytes += ROUND_UP(1*sizeof(float)); mvConstArraysToDevice(consts_bytes); if ((OP_kernels[0].count==1) || (opDat1_res_stride_OP2HOST != getSetSizeFromOpArg(&arg1))) { opDat1_res_stride_OP2HOST = getSetSizeFromOpArg(&arg1); hipMemcpyToSymbol(opDat1_res_stride_OP2CONSTANT, &opDat1_res_stride_OP2HOST,sizeof(int)); } if ((OP_kernels[0].count==1) || (direct_res_stride_OP2HOST != getSetSizeFromOpArg(&arg0))) { direct_res_stride_OP2HOST = getSetSizeFromOpArg(&arg0); hipMemcpyToSymbol(direct_res_stride_OP2CONSTANT,&direct_res_stride_OP2HOST,sizeof(int)); } //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_0 int nthread = OP_BLOCK_SIZE_0; #else int nthread = OP_block_size; #endif for ( int round=0; round<2; round++ ){ if (round==1) { op_mpi_wait_all_grouped(nargs, args, 2); } int start = round==0 ? 0 : set->core_size; int end = round==0 ? set->core_size : set->size + set->exec_size; if (end-start>0) { int nblocks = (end-start-1)/nthread+1; hipLaunchKernelGGL(( op_cuda_res), dim3(nblocks),dim3(nthread), 0, 0, (float *)arg1.data_d, (float *)arg2.data_d, arg1.map_data_d, (double*)arg0.data_d, (float*)arg3.data_d, start,end,set->size+set->exec_size); } } } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[0].time += wall_t2 - wall_t1; }
99c8bc0354466c8851c97456f998c9a095650778.cu
// // auto-generated by op2.py // __constant__ int opDat1_res_stride_OP2CONSTANT; int opDat1_res_stride_OP2HOST=-1; __constant__ int direct_res_stride_OP2CONSTANT; int direct_res_stride_OP2HOST=-1; //user function __device__ void res_gpu( const double *A, const float *u, float *du, const float *beta) { *du += (float)((*beta) * (A[(0)*direct_res_stride_OP2CONSTANT]) * (u[(0)*opDat1_res_stride_OP2CONSTANT])); } // CUDA kernel function __global__ void op_cuda_res( const float *__restrict ind_arg0, float *__restrict ind_arg1, const int *__restrict opDat1Map, const double *__restrict arg0, const float *arg3, int start, int end, int set_size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid + start < end) { int n = tid + start; //initialise local variables float arg2_l[3]; for ( int d=0; d<3; d++ ){ arg2_l[d] = ZERO_float; } int map1idx; int map2idx; map1idx = opDat1Map[n + set_size * 1]; map2idx = opDat1Map[n + set_size * 0]; //user-supplied kernel call res_gpu(arg0+n, ind_arg0+map1idx, arg2_l, arg3); atomicAdd(&ind_arg1[0*opDat1_res_stride_OP2CONSTANT+map2idx],arg2_l[0]); atomicAdd(&ind_arg1[1*opDat1_res_stride_OP2CONSTANT+map2idx],arg2_l[1]); atomicAdd(&ind_arg1[2*opDat1_res_stride_OP2CONSTANT+map2idx],arg2_l[2]); } } //host stub function void op_par_loop_res(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3){ float*arg3h = (float *)arg3.data; int nargs = 4; op_arg args[4]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(0); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[0].name = name; OP_kernels[0].count += 1; int ninds = 2; int inds[4] = {-1,0,1,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: res\n"); } int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { //transfer constants to GPU int consts_bytes = 0; consts_bytes += ROUND_UP(1*sizeof(float)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg3.data = OP_consts_h + consts_bytes; arg3.data_d = OP_consts_d + consts_bytes; for ( int d=0; d<1; d++ ){ ((float *)arg3.data)[d] = arg3h[d]; } consts_bytes += ROUND_UP(1*sizeof(float)); mvConstArraysToDevice(consts_bytes); if ((OP_kernels[0].count==1) || (opDat1_res_stride_OP2HOST != getSetSizeFromOpArg(&arg1))) { opDat1_res_stride_OP2HOST = getSetSizeFromOpArg(&arg1); cudaMemcpyToSymbol(opDat1_res_stride_OP2CONSTANT, &opDat1_res_stride_OP2HOST,sizeof(int)); } if ((OP_kernels[0].count==1) || (direct_res_stride_OP2HOST != getSetSizeFromOpArg(&arg0))) { direct_res_stride_OP2HOST = getSetSizeFromOpArg(&arg0); cudaMemcpyToSymbol(direct_res_stride_OP2CONSTANT,&direct_res_stride_OP2HOST,sizeof(int)); } //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_0 int nthread = OP_BLOCK_SIZE_0; #else int nthread = OP_block_size; #endif for ( int round=0; round<2; round++ ){ if (round==1) { op_mpi_wait_all_grouped(nargs, args, 2); } int start = round==0 ? 0 : set->core_size; int end = round==0 ? set->core_size : set->size + set->exec_size; if (end-start>0) { int nblocks = (end-start-1)/nthread+1; op_cuda_res<<<nblocks,nthread>>>( (float *)arg1.data_d, (float *)arg2.data_d, arg1.map_data_d, (double*)arg0.data_d, (float*)arg3.data_d, start,end,set->size+set->exec_size); } } } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[0].time += wall_t2 - wall_t1; }
fb9c356e05af21ed2fd45ab87a67f2effad13359.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <op_boilerplate.h> #include <loops/random.h> #include <dll.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <helpers/DebugHelper.h> using namespace randomOps; template <typename T, typename OpClass> static inline __device__ void randomSingleGeneric( Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments) { functions::random::RandomFunction<T>::template execTransformCuda<OpClass>( state, z, zShapeBuffer, extraArguments); } template <typename T, typename OpClass> static inline __device__ void randomDoubleGeneric( Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments) { functions::random::RandomFunction<T>::template execTransformCuda<OpClass>( state, x, xShapeBuffer, z, zShapeBuffer, extraArguments); } template <typename T, typename OpClass> static inline __device__ void randomTripleGeneric( Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments) { functions::random::RandomFunction<T>::template execTransformCuda<OpClass>( state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments); } #ifndef __CLION_IDE__ // here we generate kernels for target operations DISPATCH_KERNEL_SIMPLE(randomSingle_, randomSingleGeneric, float, INPUT(Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomSingle_, randomSingleGeneric, double, INPUT(Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomSingle_, randomSingleGeneric, float16, INPUT(Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomSingle_, randomSingleGeneric, bfloat16, INPUT(Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomDouble_, randomDoubleGeneric, float, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomDouble_, randomDoubleGeneric, double, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomDouble_, randomDoubleGeneric, float16, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomDouble_, randomDoubleGeneric, bfloat16, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomTriple_, randomTripleGeneric, float, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomTriple_, randomTripleGeneric, double, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomTriple_, randomTripleGeneric, float16, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomTriple_, randomTripleGeneric, bfloat16, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) #endif namespace functions { namespace random { template<typename T> template<typename OpClass> void _CUDA_D RandomFunction<T>::execTransformCuda(Nd4jPointer state, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<T*>(vx); auto y = reinterpret_cast<T*>(vy); auto z = reinterpret_cast<T*>(vz); auto extraArguments = reinterpret_cast<T*>(vextraArguments); if (OpClass::requiresSpecial) { OpClass::specialOpCuda(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments); return; } else { __shared__ Nd4jLong length; __shared__ int xEWS; __shared__ int yEWS; __shared__ int zEWS; __shared__ nd4j::graph::RandomGenerator *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; nd4j::graph::RandomGenerator *devBuffer; if (threadIdx.x == 0) { length = shape::length(zShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); extern __shared__ unsigned char shmem[]; buffer = (nd4j::graph::RandomGenerator *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::graph::RandomGenerator *> (state); dB = reinterpret_cast<unsigned char *> (state); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::graph::RandomGenerator); e += blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; if (xEWS >= 1 && yEWS >= 1 && zEWS >= 1) { for (Nd4jLong e = tid; e < length; e += blockDim.x * gridDim.x) { z[e * zEWS] = OpClass::op(x[e * xEWS], y[e * yEWS], e, length, buffer, extraArguments); } } else { for (Nd4jLong i = tid; i < length; i += blockDim.x * gridDim.x) { auto xOffset2 = shape::getIndexOffset(i, xShapeBuffer, length); auto yOffset2 = shape::getIndexOffset(i, yShapeBuffer, length); auto zOffset2 = shape::getIndexOffset(i, zShapeBuffer, length); z[zOffset2] = OpClass::op(x[xOffset2], y[yOffset2], i, length, buffer, extraArguments); } } } }; template<typename T> template<typename OpClass> void _CUDA_D RandomFunction<T>::execTransformCuda(Nd4jPointer state, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<T*>(vx); auto z = reinterpret_cast<T*>(vz); auto extraArguments = reinterpret_cast<T*>(vextraArguments); __shared__ Nd4jLong length; __shared__ int xEWS; __shared__ int zEWS; __shared__ nd4j::graph::RandomGenerator *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::graph::RandomGenerator *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::graph::RandomGenerator *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::graph::RandomGenerator *> (state); dB = reinterpret_cast<unsigned char *> (state); length = shape::length(zShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::graph::RandomGenerator); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); if (xEWS >= 1 && zEWS >= 1) { for (Nd4jLong e = blockIdx.x * blockDim.x + threadIdx.x; e < length; e += blockDim.x * gridDim.x) { z[e * zEWS] = OpClass::op(x[e * xEWS], e, length, buffer, extraArguments); } } else { for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < length; i += blockDim.x * gridDim.x) { auto xOffset2 = shape::getIndexOffset(i, xShapeBuffer, length); auto zOffset2 = shape::getIndexOffset(i, zShapeBuffer, length); z[zOffset2] = OpClass::op(x[xOffset2], i, length, buffer, extraArguments); } } } template<typename T> template<typename OpClass> void _CUDA_D RandomFunction<T>::execTransformCuda(Nd4jPointer state, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto z = reinterpret_cast<T*>(vz); auto extraArguments = reinterpret_cast<T*>(vextraArguments); Nd4jLong length = shape::length(zShapeBuffer); int ews = shape::elementWiseStride(zShapeBuffer); __shared__ nd4j::graph::RandomGenerator *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::graph::RandomGenerator *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::graph::RandomGenerator *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::graph::RandomGenerator *> (state); dB = reinterpret_cast<unsigned char *> (state); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::graph::RandomGenerator); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; if (ews >= 1) { for (Nd4jLong x = tid; x < length; x += blockDim.x * gridDim.x) { z[x * ews] = OpClass::op(x, length, buffer, extraArguments); } } else { for (Nd4jLong i = tid; i < length; i += blockDim.x * gridDim.x) { auto zOffset2 = shape::getIndexOffset(i, zShapeBuffer, length); z[zOffset2] = OpClass::op(i, length, buffer, extraArguments); } } } template <> _CUDA_H void RandomFunction<float>::executeCudaSingle(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto z = reinterpret_cast<float*>(vz); auto extraArguments = reinterpret_cast<float*>(vextraArguments); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomSingle, float, PARAMS(stateHost, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<float16>::executeCudaSingle(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto z = reinterpret_cast<float16*>(vz); auto extraArguments = reinterpret_cast<float16*>(vextraArguments); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomSingle, float16, PARAMS(stateHost, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<bfloat16>::executeCudaSingle(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto z = reinterpret_cast<bfloat16*>(vz); auto extraArguments = reinterpret_cast<bfloat16*>(vextraArguments); auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomSingle, bfloat16, PARAMS(stateHost, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<double>::executeCudaSingle(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto z = reinterpret_cast<double*>(vz); auto extraArguments = reinterpret_cast<double*>(vextraArguments); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomSingle, double, PARAMS(stateHost, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<float>::executeCudaDouble(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<float*>(vx); auto z = reinterpret_cast<float*>(vz); auto extraArguments = reinterpret_cast<float*>(vextraArguments); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomDouble, float, PARAMS(stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<float16>::executeCudaDouble(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<float16*>(vx); auto z = reinterpret_cast<float16*>(vz); auto extraArguments = reinterpret_cast<float16*>(vextraArguments); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomDouble, float16, PARAMS(stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<bfloat16>::executeCudaDouble(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<bfloat16*>(vx); auto z = reinterpret_cast<bfloat16*>(vz); auto extraArguments = reinterpret_cast<bfloat16*>(vextraArguments); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomDouble, bfloat16, PARAMS(stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<double>::executeCudaDouble(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<double*>(vx); auto z = reinterpret_cast<double*>(vz); auto extraArguments = reinterpret_cast<double*>(vextraArguments); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomDouble, double, PARAMS(stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<float>::executeCudaTriple(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<float*>(vx); auto y = reinterpret_cast<float*>(vy); auto z = reinterpret_cast<float*>(vz); auto extraArguments = reinterpret_cast<float*>(vextraArguments); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomTriple, float, PARAMS(stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<float16>::executeCudaTriple(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<float16*>(vx); auto y = reinterpret_cast<float16*>(vy); auto z = reinterpret_cast<float16*>(vz); auto extraArguments = reinterpret_cast<float16*>(vextraArguments); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomTriple, float16, PARAMS(stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<bfloat16>::executeCudaTriple(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<bfloat16*>(vx); auto y = reinterpret_cast<bfloat16*>(vy); auto z = reinterpret_cast<bfloat16*>(vz); auto extraArguments = reinterpret_cast<bfloat16*>(vextraArguments); auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomTriple, bfloat16, PARAMS(stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<double>::executeCudaTriple(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<double*>(vx); auto y = reinterpret_cast<double*>(vy); auto z = reinterpret_cast<double*>(vz); auto extraArguments = reinterpret_cast<double*>(vextraArguments); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomTriple, double, PARAMS(stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT RandomFunction, , FLOAT_TYPES); } }
fb9c356e05af21ed2fd45ab87a67f2effad13359.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <op_boilerplate.h> #include <loops/random.h> #include <dll.h> #include <cuda.h> #include <cuda_runtime.h> #include <helpers/DebugHelper.h> using namespace randomOps; template <typename T, typename OpClass> static inline __device__ void randomSingleGeneric( Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments) { functions::random::RandomFunction<T>::template execTransformCuda<OpClass>( state, z, zShapeBuffer, extraArguments); } template <typename T, typename OpClass> static inline __device__ void randomDoubleGeneric( Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments) { functions::random::RandomFunction<T>::template execTransformCuda<OpClass>( state, x, xShapeBuffer, z, zShapeBuffer, extraArguments); } template <typename T, typename OpClass> static inline __device__ void randomTripleGeneric( Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments) { functions::random::RandomFunction<T>::template execTransformCuda<OpClass>( state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments); } #ifndef __CLION_IDE__ // here we generate kernels for target operations DISPATCH_KERNEL_SIMPLE(randomSingle_, randomSingleGeneric, float, INPUT(Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomSingle_, randomSingleGeneric, double, INPUT(Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomSingle_, randomSingleGeneric, float16, INPUT(Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomSingle_, randomSingleGeneric, bfloat16, INPUT(Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomDouble_, randomDoubleGeneric, float, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomDouble_, randomDoubleGeneric, double, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomDouble_, randomDoubleGeneric, float16, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomDouble_, randomDoubleGeneric, bfloat16, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomTriple_, randomTripleGeneric, float, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomTriple_, randomTripleGeneric, double, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomTriple_, randomTripleGeneric, float16, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DISPATCH_KERNEL_SIMPLE(randomTriple_, randomTripleGeneric, bfloat16, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) #endif namespace functions { namespace random { template<typename T> template<typename OpClass> void _CUDA_D RandomFunction<T>::execTransformCuda(Nd4jPointer state, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<T*>(vx); auto y = reinterpret_cast<T*>(vy); auto z = reinterpret_cast<T*>(vz); auto extraArguments = reinterpret_cast<T*>(vextraArguments); if (OpClass::requiresSpecial) { OpClass::specialOpCuda(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments); return; } else { __shared__ Nd4jLong length; __shared__ int xEWS; __shared__ int yEWS; __shared__ int zEWS; __shared__ nd4j::graph::RandomGenerator *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; nd4j::graph::RandomGenerator *devBuffer; if (threadIdx.x == 0) { length = shape::length(zShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); extern __shared__ unsigned char shmem[]; buffer = (nd4j::graph::RandomGenerator *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::graph::RandomGenerator *> (state); dB = reinterpret_cast<unsigned char *> (state); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::graph::RandomGenerator); e += blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; if (xEWS >= 1 && yEWS >= 1 && zEWS >= 1) { for (Nd4jLong e = tid; e < length; e += blockDim.x * gridDim.x) { z[e * zEWS] = OpClass::op(x[e * xEWS], y[e * yEWS], e, length, buffer, extraArguments); } } else { for (Nd4jLong i = tid; i < length; i += blockDim.x * gridDim.x) { auto xOffset2 = shape::getIndexOffset(i, xShapeBuffer, length); auto yOffset2 = shape::getIndexOffset(i, yShapeBuffer, length); auto zOffset2 = shape::getIndexOffset(i, zShapeBuffer, length); z[zOffset2] = OpClass::op(x[xOffset2], y[yOffset2], i, length, buffer, extraArguments); } } } }; template<typename T> template<typename OpClass> void _CUDA_D RandomFunction<T>::execTransformCuda(Nd4jPointer state, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<T*>(vx); auto z = reinterpret_cast<T*>(vz); auto extraArguments = reinterpret_cast<T*>(vextraArguments); __shared__ Nd4jLong length; __shared__ int xEWS; __shared__ int zEWS; __shared__ nd4j::graph::RandomGenerator *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::graph::RandomGenerator *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::graph::RandomGenerator *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::graph::RandomGenerator *> (state); dB = reinterpret_cast<unsigned char *> (state); length = shape::length(zShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::graph::RandomGenerator); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); if (xEWS >= 1 && zEWS >= 1) { for (Nd4jLong e = blockIdx.x * blockDim.x + threadIdx.x; e < length; e += blockDim.x * gridDim.x) { z[e * zEWS] = OpClass::op(x[e * xEWS], e, length, buffer, extraArguments); } } else { for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < length; i += blockDim.x * gridDim.x) { auto xOffset2 = shape::getIndexOffset(i, xShapeBuffer, length); auto zOffset2 = shape::getIndexOffset(i, zShapeBuffer, length); z[zOffset2] = OpClass::op(x[xOffset2], i, length, buffer, extraArguments); } } } template<typename T> template<typename OpClass> void _CUDA_D RandomFunction<T>::execTransformCuda(Nd4jPointer state, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto z = reinterpret_cast<T*>(vz); auto extraArguments = reinterpret_cast<T*>(vextraArguments); Nd4jLong length = shape::length(zShapeBuffer); int ews = shape::elementWiseStride(zShapeBuffer); __shared__ nd4j::graph::RandomGenerator *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::graph::RandomGenerator *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::graph::RandomGenerator *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::graph::RandomGenerator *> (state); dB = reinterpret_cast<unsigned char *> (state); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::graph::RandomGenerator); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; if (ews >= 1) { for (Nd4jLong x = tid; x < length; x += blockDim.x * gridDim.x) { z[x * ews] = OpClass::op(x, length, buffer, extraArguments); } } else { for (Nd4jLong i = tid; i < length; i += blockDim.x * gridDim.x) { auto zOffset2 = shape::getIndexOffset(i, zShapeBuffer, length); z[zOffset2] = OpClass::op(i, length, buffer, extraArguments); } } } template <> _CUDA_H void RandomFunction<float>::executeCudaSingle(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto z = reinterpret_cast<float*>(vz); auto extraArguments = reinterpret_cast<float*>(vextraArguments); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomSingle, float, PARAMS(stateHost, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<float16>::executeCudaSingle(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto z = reinterpret_cast<float16*>(vz); auto extraArguments = reinterpret_cast<float16*>(vextraArguments); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomSingle, float16, PARAMS(stateHost, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<bfloat16>::executeCudaSingle(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto z = reinterpret_cast<bfloat16*>(vz); auto extraArguments = reinterpret_cast<bfloat16*>(vextraArguments); auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomSingle, bfloat16, PARAMS(stateHost, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<double>::executeCudaSingle(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto z = reinterpret_cast<double*>(vz); auto extraArguments = reinterpret_cast<double*>(vextraArguments); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomSingle, double, PARAMS(stateHost, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<float>::executeCudaDouble(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<float*>(vx); auto z = reinterpret_cast<float*>(vz); auto extraArguments = reinterpret_cast<float*>(vextraArguments); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomDouble, float, PARAMS(stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<float16>::executeCudaDouble(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<float16*>(vx); auto z = reinterpret_cast<float16*>(vz); auto extraArguments = reinterpret_cast<float16*>(vextraArguments); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomDouble, float16, PARAMS(stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<bfloat16>::executeCudaDouble(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<bfloat16*>(vx); auto z = reinterpret_cast<bfloat16*>(vz); auto extraArguments = reinterpret_cast<bfloat16*>(vextraArguments); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomDouble, bfloat16, PARAMS(stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<double>::executeCudaDouble(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<double*>(vx); auto z = reinterpret_cast<double*>(vz); auto extraArguments = reinterpret_cast<double*>(vextraArguments); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomDouble, double, PARAMS(stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<float>::executeCudaTriple(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<float*>(vx); auto y = reinterpret_cast<float*>(vy); auto z = reinterpret_cast<float*>(vz); auto extraArguments = reinterpret_cast<float*>(vextraArguments); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomTriple, float, PARAMS(stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<float16>::executeCudaTriple(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<float16*>(vx); auto y = reinterpret_cast<float16*>(vy); auto z = reinterpret_cast<float16*>(vz); auto extraArguments = reinterpret_cast<float16*>(vextraArguments); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomTriple, float16, PARAMS(stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<bfloat16>::executeCudaTriple(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<bfloat16*>(vx); auto y = reinterpret_cast<bfloat16*>(vy); auto z = reinterpret_cast<bfloat16*>(vz); auto extraArguments = reinterpret_cast<bfloat16*>(vextraArguments); auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomTriple, bfloat16, PARAMS(stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void RandomFunction<double>::executeCudaTriple(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) { auto x = reinterpret_cast<double*>(vx); auto y = reinterpret_cast<double*>(vy); auto z = reinterpret_cast<double*>(vz); auto extraArguments = reinterpret_cast<double*>(vextraArguments); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomTriple, double, PARAMS(stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) DEBUG_KERNEL(stream, opNum); } BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT RandomFunction, , FLOAT_TYPES); } }
26b9d9d4e2ddb97c9b38321d05efdcf5d451345c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sumGrad(float* output, float* input1, float* input2, float* input3, float* input4, const int numElem) { size_t pos = blockDim.x * blockIdx.x + threadIdx.x; size_t size = blockDim.x * gridDim.x; for(int i = numElem * pos / size; i < numElem * (pos+1) / size; i++){ output[i] = input1[i] + input2[i] + input3[i] + input4[i]; } }
26b9d9d4e2ddb97c9b38321d05efdcf5d451345c.cu
#include "includes.h" __global__ void sumGrad(float* output, float* input1, float* input2, float* input3, float* input4, const int numElem) { size_t pos = blockDim.x * blockIdx.x + threadIdx.x; size_t size = blockDim.x * gridDim.x; for(int i = numElem * pos / size; i < numElem * (pos+1) / size; i++){ output[i] = input1[i] + input2[i] + input3[i] + input4[i]; } }
8eac9bc16cb8e0a8f33d3dadab49247110b71248.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "nested_json.hpp" #include <io/utilities/parsing_utils.cuh> #include <io/utilities/type_inference.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/detail/utilities/visitor_overload.hpp> #include <cudf/io/detail/data_casting.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/count.h> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/gather.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/unique.h> #include <cuda/atomic> #include <algorithm> #include <cstdint> namespace cudf::io::json::detail { // DEBUG prints auto to_cat = [](auto v) -> std::string { switch (v) { case NC_STRUCT: return " S"; case NC_LIST: return " L"; case NC_STR: return " \""; case NC_VAL: return " V"; case NC_FN: return " F"; case NC_ERR: return "ER"; default: return "UN"; }; }; auto to_int = [](auto v) { return std::to_string(static_cast<int>(v)); }; auto print_vec = [](auto const& cpu, auto const name, auto converter) { for (auto const& v : cpu) printf("%3s,", converter(v).c_str()); std::cout << name << std::endl; }; void print_tree(host_span<SymbolT const> input, tree_meta_t const& d_gpu_tree, rmm::cuda_stream_view stream) { print_vec(cudf::detail::make_std_vector_async(d_gpu_tree.node_categories, stream), "node_categories", to_cat); print_vec(cudf::detail::make_std_vector_async(d_gpu_tree.parent_node_ids, stream), "parent_node_ids", to_int); print_vec( cudf::detail::make_std_vector_async(d_gpu_tree.node_levels, stream), "node_levels", to_int); auto node_range_begin = cudf::detail::make_std_vector_async(d_gpu_tree.node_range_begin, stream); auto node_range_end = cudf::detail::make_std_vector_async(d_gpu_tree.node_range_end, stream); print_vec(node_range_begin, "node_range_begin", to_int); print_vec(node_range_end, "node_range_end", to_int); for (int i = 0; i < int(node_range_begin.size()); i++) { printf("%3s ", std::string(input.data() + node_range_begin[i], node_range_end[i] - node_range_begin[i]) .c_str()); } printf(" (JSON)\n"); } /** * @brief Reduces node tree representation to column tree representation. * * @param tree Node tree representation of JSON string * @param original_col_ids Column ids of nodes * @param sorted_col_ids Sorted column ids of nodes * @param ordered_node_ids Node ids of nodes sorted by column ids * @param row_offsets Row offsets of nodes * @param is_array_of_arrays Whether the tree is an array of arrays * @param row_array_parent_col_id Column id of row array, if is_array_of_arrays is true * @param stream CUDA stream used for device memory operations and kernel launches * @return A tuple of column tree representation of JSON string, column ids of columns, and * max row offsets of columns */ std::tuple<tree_meta_t, rmm::device_uvector<NodeIndexT>, rmm::device_uvector<size_type>> reduce_to_column_tree(tree_meta_t& tree, device_span<NodeIndexT> original_col_ids, device_span<NodeIndexT> sorted_col_ids, device_span<NodeIndexT> ordered_node_ids, device_span<size_type> row_offsets, bool is_array_of_arrays, NodeIndexT const row_array_parent_col_id, rmm::cuda_stream_view stream) { CUDF_FUNC_RANGE(); // 1. column count for allocation auto const num_columns = thrust::unique_count(rmm::exec_policy(stream), sorted_col_ids.begin(), sorted_col_ids.end()); // 2. reduce_by_key {col_id}, {row_offset}, max. rmm::device_uvector<NodeIndexT> unique_col_ids(num_columns, stream); rmm::device_uvector<size_type> max_row_offsets(num_columns, stream); auto ordered_row_offsets = thrust::make_permutation_iterator(row_offsets.begin(), ordered_node_ids.begin()); thrust::reduce_by_key(rmm::exec_policy(stream), sorted_col_ids.begin(), sorted_col_ids.end(), ordered_row_offsets, unique_col_ids.begin(), max_row_offsets.begin(), thrust::equal_to<size_type>(), thrust::maximum<size_type>()); // 3. reduce_by_key {col_id}, {node_categories} - custom opp (*+v=*, v+v=v, *+#=E) rmm::device_uvector<NodeT> column_categories(num_columns, stream); thrust::reduce_by_key( rmm::exec_policy(stream), sorted_col_ids.begin(), sorted_col_ids.end(), thrust::make_permutation_iterator(tree.node_categories.begin(), ordered_node_ids.begin()), unique_col_ids.begin(), column_categories.begin(), thrust::equal_to<size_type>(), [] __device__(NodeT type_a, NodeT type_b) -> NodeT { auto is_a_leaf = (type_a == NC_VAL || type_a == NC_STR); auto is_b_leaf = (type_b == NC_VAL || type_b == NC_STR); // (v+v=v, *+*=*, *+v=*, *+#=E, NESTED+VAL=NESTED) // *+*=*, v+v=v if (type_a == type_b) { return type_a; } else if (is_a_leaf) { // *+v=*, N+V=N // STRUCT/LIST + STR/VAL = STRUCT/LIST, STR/VAL + FN = ERR, STR/VAL + STR = STR return type_b == NC_FN ? NC_ERR : (is_b_leaf ? NC_STR : type_b); } else if (is_b_leaf) { return type_a == NC_FN ? NC_ERR : (is_a_leaf ? NC_STR : type_a); } // *+#=E return NC_ERR; }); // 4. unique_copy parent_node_ids, ranges rmm::device_uvector<TreeDepthT> column_levels(0, stream); // not required rmm::device_uvector<NodeIndexT> parent_col_ids(num_columns, stream); rmm::device_uvector<SymbolOffsetT> col_range_begin(num_columns, stream); // Field names rmm::device_uvector<SymbolOffsetT> col_range_end(num_columns, stream); rmm::device_uvector<size_type> unique_node_ids(num_columns, stream); thrust::unique_by_key_copy(rmm::exec_policy(stream), sorted_col_ids.begin(), sorted_col_ids.end(), ordered_node_ids.begin(), thrust::make_discard_iterator(), unique_node_ids.begin()); thrust::copy_n( rmm::exec_policy(stream), thrust::make_zip_iterator( thrust::make_permutation_iterator(tree.parent_node_ids.begin(), unique_node_ids.begin()), thrust::make_permutation_iterator(tree.node_range_begin.begin(), unique_node_ids.begin()), thrust::make_permutation_iterator(tree.node_range_end.begin(), unique_node_ids.begin())), unique_node_ids.size(), thrust::make_zip_iterator( parent_col_ids.begin(), col_range_begin.begin(), col_range_end.begin())); // convert parent_node_ids to parent_col_ids thrust::transform( rmm::exec_policy(stream), parent_col_ids.begin(), parent_col_ids.end(), parent_col_ids.begin(), [col_ids = original_col_ids.begin()] __device__(auto parent_node_id) -> size_type { return parent_node_id == parent_node_sentinel ? parent_node_sentinel : col_ids[parent_node_id]; }); // condition is true if parent is not a list, or sentinel/root // Special case to return true if parent is a list and is_array_of_arrays is true auto is_non_list_parent = [column_categories = column_categories.begin(), is_array_of_arrays, row_array_parent_col_id] __device__(auto parent_col_id) -> bool { return !(parent_col_id == parent_node_sentinel || column_categories[parent_col_id] == NC_LIST && (!is_array_of_arrays || parent_col_id != row_array_parent_col_id)); }; // Mixed types in List children go to different columns, // so all immediate children of list column should have same max_row_offsets. // create list's children max_row_offsets array. (initialize to zero) // atomicMax on children max_row_offsets array. // gather the max_row_offsets from children row offset array. { rmm::device_uvector<NodeIndexT> list_parents_children_max_row_offsets(num_columns, stream); thrust::fill(rmm::exec_policy(stream), list_parents_children_max_row_offsets.begin(), list_parents_children_max_row_offsets.end(), 0); thrust::for_each(rmm::exec_policy(stream), unique_col_ids.begin(), unique_col_ids.end(), [column_categories = column_categories.begin(), parent_col_ids = parent_col_ids.begin(), max_row_offsets = max_row_offsets.begin(), list_parents_children_max_row_offsets = list_parents_children_max_row_offsets.begin()] __device__(auto col_id) { auto parent_col_id = parent_col_ids[col_id]; if (parent_col_id != parent_node_sentinel and column_categories[parent_col_id] == node_t::NC_LIST) { cuda::atomic_ref<NodeIndexT, cuda::thread_scope_device> ref{ *(list_parents_children_max_row_offsets + parent_col_id)}; ref.fetch_max(max_row_offsets[col_id], cuda::std::memory_order_relaxed); } }); thrust::gather_if( rmm::exec_policy(stream), parent_col_ids.begin(), parent_col_ids.end(), parent_col_ids.begin(), list_parents_children_max_row_offsets.begin(), max_row_offsets.begin(), [column_categories = column_categories.begin()] __device__(size_type parent_col_id) { return parent_col_id != parent_node_sentinel and column_categories[parent_col_id] == node_t::NC_LIST; }); } // copy lists' max_row_offsets to children. // all structs should have same size. thrust::transform_if( rmm::exec_policy(stream), unique_col_ids.begin(), unique_col_ids.end(), max_row_offsets.begin(), [column_categories = column_categories.begin(), is_non_list_parent, parent_col_ids = parent_col_ids.begin(), max_row_offsets = max_row_offsets.begin()] __device__(size_type col_id) { auto parent_col_id = parent_col_ids[col_id]; // condition is true if parent is not a list, or sentinel/root while (is_non_list_parent(parent_col_id)) { col_id = parent_col_id; parent_col_id = parent_col_ids[parent_col_id]; } return max_row_offsets[col_id]; }, [column_categories = column_categories.begin(), is_non_list_parent, parent_col_ids = parent_col_ids.begin()] __device__(size_type col_id) { auto parent_col_id = parent_col_ids[col_id]; // condition is true if parent is not a list, or sentinel/root return is_non_list_parent(parent_col_id); }); return std::tuple{tree_meta_t{std::move(column_categories), std::move(parent_col_ids), std::move(column_levels), std::move(col_range_begin), std::move(col_range_end)}, std::move(unique_col_ids), std::move(max_row_offsets)}; } /** * @brief Get the column indices for the values column for array of arrays rows * * @param row_array_children_level The level of the row array's children * @param d_tree The tree metadata * @param col_ids The column ids * @param num_columns The number of columns * @param stream The stream to use * @return The value columns' indices */ rmm::device_uvector<NodeIndexT> get_values_column_indices(TreeDepthT const row_array_children_level, tree_meta_t const& d_tree, device_span<NodeIndexT> col_ids, size_type const num_columns, rmm::cuda_stream_view stream) { CUDF_FUNC_RANGE(); auto [level2_nodes, level2_indices] = get_array_children_indices( row_array_children_level, d_tree.node_levels, d_tree.parent_node_ids, stream); auto col_id_location = thrust::make_permutation_iterator(col_ids.begin(), level2_nodes.begin()); rmm::device_uvector<NodeIndexT> values_column_indices(num_columns, stream); thrust::scatter(rmm::exec_policy(stream), level2_indices.begin(), level2_indices.end(), col_id_location, values_column_indices.begin()); return values_column_indices; } /** * @brief Copies strings specified by pair of begin, end offsets to host vector of strings. * * @param input String device buffer * @param node_range_begin Begin offset of the strings * @param node_range_end End offset of the strings * @param stream CUDA stream * @return Vector of strings */ std::vector<std::string> copy_strings_to_host(device_span<SymbolT const> input, device_span<SymbolOffsetT const> node_range_begin, device_span<SymbolOffsetT const> node_range_end, rmm::cuda_stream_view stream) { CUDF_FUNC_RANGE(); auto const num_strings = node_range_begin.size(); rmm::device_uvector<thrust::pair<char const*, size_type>> string_views(num_strings, stream); auto d_offset_pairs = thrust::make_zip_iterator(node_range_begin.begin(), node_range_end.begin()); thrust::transform(rmm::exec_policy(stream), d_offset_pairs, d_offset_pairs + num_strings, string_views.begin(), [data = input.data()] __device__(auto const& offsets) { // Note: first character for non-field columns return thrust::make_pair( data + thrust::get<0>(offsets), static_cast<size_type>(thrust::get<1>(offsets) - thrust::get<0>(offsets))); }); cudf::io::parse_options_view options_view{}; options_view.quotechar = '\0'; // no quotes options_view.keepquotes = true; auto d_column_names = parse_data(string_views.begin(), num_strings, data_type{type_id::STRING}, rmm::device_buffer{}, 0, options_view, stream, rmm::mr::get_current_device_resource()); auto to_host = [stream](auto const& col) { if (col.is_empty()) return std::vector<std::string>{}; auto const scv = cudf::strings_column_view(col); auto const h_chars = cudf::detail::make_std_vector_sync<char>( cudf::device_span<char const>(scv.chars().data<char>(), scv.chars().size()), stream); auto const h_offsets = cudf::detail::make_std_vector_sync( cudf::device_span<cudf::size_type const>(scv.offsets().data<cudf::size_type>() + scv.offset(), scv.size() + 1), stream); // build std::string vector from chars and offsets std::vector<std::string> host_data; host_data.reserve(col.size()); std::transform( std::begin(h_offsets), std::end(h_offsets) - 1, std::begin(h_offsets) + 1, std::back_inserter(host_data), [&](auto start, auto end) { return std::string(h_chars.data() + start, end - start); }); return host_data; }; return to_host(d_column_names->view()); } /** * @brief Holds member data pointers of `d_json_column` * */ struct json_column_data { using row_offset_t = json_column::row_offset_t; row_offset_t* string_offsets; row_offset_t* string_lengths; row_offset_t* child_offsets; bitmask_type* validity; }; /** * @brief Constructs `d_json_column` from node tree representation * Newly constructed columns are insert into `root`'s children. * `root` must be a list type. * * @param input Input JSON string device data * @param tree Node tree representation of the JSON string * @param col_ids Column ids of the nodes in the tree * @param row_offsets Row offsets of the nodes in the tree * @param root Root node of the `d_json_column` tree * @param is_array_of_arrays Whether the tree is an array of arrays * @param is_enabled_lines Whether the input is a line-delimited JSON * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the device memory * of child_offets and validity members of `d_json_column` */ void make_device_json_column(device_span<SymbolT const> input, tree_meta_t& tree, device_span<NodeIndexT> col_ids, device_span<size_type> row_offsets, device_json_column& root, bool is_array_of_arrays, bool is_enabled_lines, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); auto num_nodes = col_ids.size(); rmm::device_uvector<NodeIndexT> sorted_col_ids(col_ids.size(), stream); // make a copy thrust::copy(rmm::exec_policy(stream), col_ids.begin(), col_ids.end(), sorted_col_ids.begin()); // sort by {col_id} on {node_ids} stable rmm::device_uvector<NodeIndexT> node_ids(col_ids.size(), stream); thrust::sequence(rmm::exec_policy(stream), node_ids.begin(), node_ids.end()); thrust::stable_sort_by_key( rmm::exec_policy(stream), sorted_col_ids.begin(), sorted_col_ids.end(), node_ids.begin()); NodeIndexT const row_array_parent_col_id = [&]() { if (!is_array_of_arrays) return parent_node_sentinel; auto const list_node_index = is_enabled_lines ? 0 : 1; NodeIndexT value; CUDF_CUDA_TRY(hipMemcpyAsync(&value, col_ids.data() + list_node_index, sizeof(NodeIndexT), hipMemcpyDefault, stream.value())); stream.synchronize(); return value; }(); // 1. gather column information. auto [d_column_tree, d_unique_col_ids, d_max_row_offsets] = reduce_to_column_tree(tree, col_ids, sorted_col_ids, node_ids, row_offsets, is_array_of_arrays, row_array_parent_col_id, stream); auto num_columns = d_unique_col_ids.size(); auto unique_col_ids = cudf::detail::make_std_vector_async(d_unique_col_ids, stream); auto column_categories = cudf::detail::make_std_vector_async(d_column_tree.node_categories, stream); auto column_parent_ids = cudf::detail::make_std_vector_async(d_column_tree.parent_node_ids, stream); auto column_range_beg = cudf::detail::make_std_vector_async(d_column_tree.node_range_begin, stream); auto max_row_offsets = cudf::detail::make_std_vector_async(d_max_row_offsets, stream); std::vector<std::string> column_names = copy_strings_to_host( input, d_column_tree.node_range_begin, d_column_tree.node_range_end, stream); // array of arrays column names if (is_array_of_arrays) { TreeDepthT const row_array_children_level = is_enabled_lines ? 1 : 2; auto values_column_indices = get_values_column_indices(row_array_children_level, tree, col_ids, num_columns, stream); auto h_values_column_indices = cudf::detail::make_std_vector_async(values_column_indices, stream); std::transform(unique_col_ids.begin(), unique_col_ids.end(), column_names.begin(), column_names.begin(), [&h_values_column_indices, &column_parent_ids, row_array_parent_col_id]( auto col_id, auto name) mutable { return column_parent_ids[col_id] == row_array_parent_col_id ? std::to_string(h_values_column_indices[col_id]) : name; }); } auto to_json_col_type = [](auto category) { switch (category) { case NC_STRUCT: return json_col_t::StructColumn; case NC_LIST: return json_col_t::ListColumn; case NC_STR: [[fallthrough]]; case NC_VAL: return json_col_t::StringColumn; default: return json_col_t::Unknown; } }; auto init_to_zero = [stream](auto& v) { thrust::uninitialized_fill(rmm::exec_policy(stream), v.begin(), v.end(), 0); }; auto initialize_json_columns = [&](auto i, auto& col) { if (column_categories[i] == NC_ERR || column_categories[i] == NC_FN) { return; } else if (column_categories[i] == NC_VAL || column_categories[i] == NC_STR) { col.string_offsets.resize(max_row_offsets[i] + 1, stream); col.string_lengths.resize(max_row_offsets[i] + 1, stream); init_to_zero(col.string_offsets); init_to_zero(col.string_lengths); } else if (column_categories[i] == NC_LIST) { col.child_offsets.resize(max_row_offsets[i] + 2, stream); init_to_zero(col.child_offsets); } col.num_rows = max_row_offsets[i] + 1; col.validity = cudf::detail::create_null_mask(col.num_rows, cudf::mask_state::ALL_NULL, stream, mr); col.type = to_json_col_type(column_categories[i]); }; // 2. generate nested columns tree and its device_memory // reorder unique_col_ids w.r.t. column_range_begin for order of column to be in field order. auto h_range_col_id_it = thrust::make_zip_iterator(column_range_beg.begin(), unique_col_ids.begin()); std::sort(h_range_col_id_it, h_range_col_id_it + num_columns, [](auto const& a, auto const& b) { return thrust::get<0>(a) < thrust::get<0>(b); }); // use hash map because we may skip field name's col_ids std::unordered_map<NodeIndexT, std::reference_wrapper<device_json_column>> columns; // map{parent_col_id, child_col_name}> = child_col_id, used for null value column tracking std::map<std::pair<NodeIndexT, std::string>, NodeIndexT> mapped_columns; // find column_ids which are values, but should be ignored in validity std::vector<uint8_t> ignore_vals(num_columns, 0); columns.try_emplace(parent_node_sentinel, std::ref(root)); for (auto const this_col_id : unique_col_ids) { if (column_categories[this_col_id] == NC_ERR || column_categories[this_col_id] == NC_FN) { continue; } // Struct, List, String, Value std::string name = ""; auto parent_col_id = column_parent_ids[this_col_id]; if (parent_col_id == parent_node_sentinel || column_categories[parent_col_id] == NC_LIST) { if (is_array_of_arrays && parent_col_id == row_array_parent_col_id) { name = column_names[this_col_id]; } else { name = list_child_name; } } else if (column_categories[parent_col_id] == NC_FN) { auto field_name_col_id = parent_col_id; parent_col_id = column_parent_ids[parent_col_id]; name = column_names[field_name_col_id]; } else { CUDF_FAIL("Unexpected parent column category"); } // If the child is already found, // replace if this column is a nested column and the existing was a value column // ignore this column if this column is a value column and the existing was a nested column auto it = columns.find(parent_col_id); CUDF_EXPECTS(it != columns.end(), "Parent column not found"); auto& parent_col = it->second.get(); bool replaced = false; if (mapped_columns.count({parent_col_id, name}) > 0) { if (column_categories[this_col_id] == NC_VAL || column_categories[this_col_id] == NC_STR) { ignore_vals[this_col_id] = 1; continue; } auto old_col_id = mapped_columns[{parent_col_id, name}]; if (column_categories[old_col_id] == NC_VAL || column_categories[old_col_id] == NC_STR) { // remap ignore_vals[old_col_id] = 1; mapped_columns.erase({parent_col_id, name}); columns.erase(old_col_id); parent_col.child_columns.erase(name); replaced = true; // to skip duplicate name in column_order } else { // If this is a nested column but we're trying to insert either (a) a list node into a // struct column or (b) a struct node into a list column, we fail CUDF_EXPECTS(not((column_categories[old_col_id] == NC_LIST and column_categories[this_col_id] == NC_STRUCT) or (column_categories[old_col_id] == NC_STRUCT and column_categories[this_col_id] == NC_LIST)), "A mix of lists and structs within the same column is not supported"); } } CUDF_EXPECTS(parent_col.child_columns.count(name) == 0, "duplicate column name: " + name); // move into parent device_json_column col(stream, mr); initialize_json_columns(this_col_id, col); auto inserted = parent_col.child_columns.try_emplace(name, std::move(col)).second; CUDF_EXPECTS(inserted, "child column insertion failed, duplicate column name in the parent"); if (not replaced) parent_col.column_order.push_back(name); columns.try_emplace(this_col_id, std::ref(parent_col.child_columns.at(name))); mapped_columns.try_emplace(std::make_pair(parent_col_id, name), this_col_id); } // restore unique_col_ids order std::sort(h_range_col_id_it, h_range_col_id_it + num_columns, [](auto const& a, auto const& b) { return thrust::get<1>(a) < thrust::get<1>(b); }); // move columns data to device. std::vector<json_column_data> columns_data(num_columns); for (auto& [col_id, col_ref] : columns) { if (col_id == parent_node_sentinel) continue; auto& col = col_ref.get(); columns_data[col_id] = json_column_data{col.string_offsets.data(), col.string_lengths.data(), col.child_offsets.data(), static_cast<bitmask_type*>(col.validity.data())}; } auto d_ignore_vals = cudf::detail::make_device_uvector_async( ignore_vals, stream, rmm::mr::get_current_device_resource()); auto d_columns_data = cudf::detail::make_device_uvector_async( columns_data, stream, rmm::mr::get_current_device_resource()); // 3. scatter string offsets to respective columns, set validity bits thrust::for_each_n( rmm::exec_policy(stream), thrust::counting_iterator<size_type>(0), num_nodes, [node_categories = tree.node_categories.begin(), col_ids = col_ids.begin(), row_offsets = row_offsets.begin(), range_begin = tree.node_range_begin.begin(), range_end = tree.node_range_end.begin(), d_ignore_vals = d_ignore_vals.begin(), d_columns_data = d_columns_data.begin()] __device__(size_type i) { switch (node_categories[i]) { case NC_STRUCT: set_bit(d_columns_data[col_ids[i]].validity, row_offsets[i]); break; case NC_LIST: set_bit(d_columns_data[col_ids[i]].validity, row_offsets[i]); break; case NC_STR: [[fallthrough]]; case NC_VAL: if (d_ignore_vals[col_ids[i]]) break; set_bit(d_columns_data[col_ids[i]].validity, row_offsets[i]); d_columns_data[col_ids[i]].string_offsets[row_offsets[i]] = range_begin[i]; d_columns_data[col_ids[i]].string_lengths[row_offsets[i]] = range_end[i] - range_begin[i]; break; default: break; } }); // 4. scatter List offset // copy_if only node's whose parent is list, (node_id, parent_col_id) // stable_sort by parent_col_id of {node_id}. // For all unique parent_node_id of (i==0, i-1!=i), write start offset. // (i==last, i+1!=i), write end offset. // unique_copy_by_key {parent_node_id} {row_offset} to // col[parent_col_id].child_offsets[row_offset[parent_node_id]] auto& parent_col_ids = sorted_col_ids; // reuse sorted_col_ids auto parent_col_id = thrust::make_transform_iterator( thrust::make_counting_iterator<size_type>(0), [col_ids = col_ids.begin(), parent_node_ids = tree.parent_node_ids.begin()] __device__(size_type node_id) { return parent_node_ids[node_id] == parent_node_sentinel ? parent_node_sentinel : col_ids[parent_node_ids[node_id]]; }); auto const list_children_end = thrust::copy_if( rmm::exec_policy(stream), thrust::make_zip_iterator(thrust::make_counting_iterator<size_type>(0), parent_col_id), thrust::make_zip_iterator(thrust::make_counting_iterator<size_type>(0), parent_col_id) + num_nodes, thrust::make_counting_iterator<size_type>(0), thrust::make_zip_iterator(node_ids.begin(), parent_col_ids.begin()), [node_categories = tree.node_categories.begin(), parent_node_ids = tree.parent_node_ids.begin()] __device__(size_type node_id) { auto parent_node_id = parent_node_ids[node_id]; return parent_node_id != parent_node_sentinel and node_categories[parent_node_id] == NC_LIST; }); auto const num_list_children = list_children_end - thrust::make_zip_iterator(node_ids.begin(), parent_col_ids.begin()); thrust::stable_sort_by_key(rmm::exec_policy(stream), parent_col_ids.begin(), parent_col_ids.begin() + num_list_children, node_ids.begin()); thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), num_list_children, [node_ids = node_ids.begin(), parent_node_ids = tree.parent_node_ids.begin(), parent_col_ids = parent_col_ids.begin(), row_offsets = row_offsets.begin(), d_columns_data = d_columns_data.begin(), num_list_children] __device__(size_type i) { auto const node_id = node_ids[i]; auto const parent_node_id = parent_node_ids[node_id]; // scatter to list_offset if (i == 0 or parent_node_ids[node_ids[i - 1]] != parent_node_id) { d_columns_data[parent_col_ids[i]].child_offsets[row_offsets[parent_node_id]] = row_offsets[node_id]; } // last value of list child_offset is its size. if (i == num_list_children - 1 or parent_node_ids[node_ids[i + 1]] != parent_node_id) { d_columns_data[parent_col_ids[i]].child_offsets[row_offsets[parent_node_id] + 1] = row_offsets[node_id] + 1; } }); // 5. scan on offsets. for (auto& [id, col_ref] : columns) { auto& col = col_ref.get(); if (col.type == json_col_t::StringColumn) { thrust::inclusive_scan(rmm::exec_policy(stream), col.string_offsets.begin(), col.string_offsets.end(), col.string_offsets.begin(), thrust::maximum<json_column::row_offset_t>{}); } else if (col.type == json_col_t::ListColumn) { thrust::inclusive_scan(rmm::exec_policy(stream), col.child_offsets.begin(), col.child_offsets.end(), col.child_offsets.begin(), thrust::maximum<json_column::row_offset_t>{}); } } } /** * @brief Retrieves the parse_options to be used for type inference and type casting * * @param options The reader options to influence the relevant type inference and type casting * options */ cudf::io::parse_options parsing_options(cudf::io::json_reader_options const& options, rmm::cuda_stream_view stream); std::pair<std::unique_ptr<column>, std::vector<column_name_info>> device_json_column_to_cudf_column( device_json_column& json_col, device_span<SymbolT const> d_input, cudf::io::parse_options const& options, std::optional<schema_element> schema, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); auto validity_size_check = [](device_json_column& json_col) { CUDF_EXPECTS(json_col.validity.size() >= bitmask_allocation_size_bytes(json_col.num_rows), "valid_count is too small"); }; auto make_validity = [stream, validity_size_check]( device_json_column& json_col) -> std::pair<rmm::device_buffer, size_type> { validity_size_check(json_col); auto null_count = cudf::detail::null_count( static_cast<bitmask_type*>(json_col.validity.data()), 0, json_col.num_rows, stream); // full null_mask is always required for parse_data return {std::move(json_col.validity), null_count}; // Note: json_col modified here, moves this memory }; auto get_child_schema = [schema](auto child_name) -> std::optional<schema_element> { if (schema.has_value()) { auto const result = schema.value().child_types.find(child_name); if (result != std::end(schema.value().child_types)) { return result->second; } } return {}; }; switch (json_col.type) { case json_col_t::StringColumn: { // move string_offsets to GPU and transform to string column auto const col_size = json_col.string_offsets.size(); using char_length_pair_t = thrust::pair<char const*, size_type>; CUDF_EXPECTS(json_col.string_offsets.size() == json_col.string_lengths.size(), "string offset, string length mismatch"); rmm::device_uvector<char_length_pair_t> d_string_data(col_size, stream); // TODO how about directly storing pair<char*, size_t> in json_column? auto offset_length_it = thrust::make_zip_iterator(json_col.string_offsets.begin(), json_col.string_lengths.begin()); // Prepare iterator that returns (string_offset, string_length)-pairs needed by inference auto string_ranges_it = thrust::make_transform_iterator(offset_length_it, [] __device__(auto ip) { return thrust::pair<json_column::row_offset_t, std::size_t>{ thrust::get<0>(ip), static_cast<std::size_t>(thrust::get<1>(ip))}; }); // Prepare iterator that returns (string_ptr, string_length)-pairs needed by type conversion auto string_spans_it = thrust::make_transform_iterator( offset_length_it, [data = d_input.data()] __device__(auto ip) { return thrust::pair<char const*, std::size_t>{ data + thrust::get<0>(ip), static_cast<std::size_t>(thrust::get<1>(ip))}; }); data_type target_type{}; if (schema.has_value()) { #ifdef NJP_DEBUG_PRINT std::cout << "-> explicit type: " << (schema.has_value() ? std::to_string(static_cast<int>(schema->type.id())) : "n/a"); #endif target_type = schema.value().type; } // Infer column type, if we don't have an explicit type for it else { target_type = cudf::io::detail::infer_data_type( options.json_view(), d_input, string_ranges_it, col_size, stream); } auto [result_bitmask, null_count] = make_validity(json_col); // Convert strings to the inferred data type auto col = parse_data(string_spans_it, col_size, target_type, std::move(result_bitmask), null_count, options.view(), stream, mr); // Reset nullable if we do not have nulls // This is to match the existing JSON reader's behaviour: // - Non-string columns will always be returned as nullable // - String columns will be returned as nullable, iff there's at least one null entry if (target_type.id() == type_id::STRING and col->null_count() == 0) { col->set_null_mask(rmm::device_buffer{0, stream, mr}, 0); } // For string columns return ["offsets", "char"] schema if (target_type.id() == type_id::STRING) { return {std::move(col), std::vector<column_name_info>{{"offsets"}, {"chars"}}}; } // Non-string leaf-columns (e.g., numeric) do not have child columns in the schema return {std::move(col), std::vector<column_name_info>{}}; } case json_col_t::StructColumn: { std::vector<std::unique_ptr<column>> child_columns; std::vector<column_name_info> column_names{}; size_type num_rows{json_col.num_rows}; // Create children columns for (auto const& col_name : json_col.column_order) { auto const& col = json_col.child_columns.find(col_name); column_names.emplace_back(col->first); auto& child_col = col->second; auto [child_column, names] = device_json_column_to_cudf_column( child_col, d_input, options, get_child_schema(col_name), stream, mr); CUDF_EXPECTS(num_rows == child_column->size(), "All children columns must have the same size"); child_columns.push_back(std::move(child_column)); column_names.back().children = names; } auto [result_bitmask, null_count] = make_validity(json_col); // The null_mask is set after creation of struct column is to skip the superimpose_nulls and // null validation applied in make_structs_column factory, which is not needed for json auto ret_col = make_structs_column(num_rows, std::move(child_columns), 0, {}, stream, mr); ret_col->set_null_mask(std::move(result_bitmask), null_count); return {std::move(ret_col), column_names}; } case json_col_t::ListColumn: { size_type num_rows = json_col.child_offsets.size() - 1; std::vector<column_name_info> column_names{}; column_names.emplace_back("offsets"); column_names.emplace_back( json_col.child_columns.empty() ? list_child_name : json_col.child_columns.begin()->first); // Note: json_col modified here, reuse the memory auto offsets_column = std::make_unique<column>(data_type{type_id::INT32}, num_rows + 1, json_col.child_offsets.release(), rmm::device_buffer{}, 0); // Create children column auto [child_column, names] = json_col.child_columns.empty() ? std::pair<std::unique_ptr<column>, // EMPTY type could not used because gather throws exception on EMPTY type. std::vector<column_name_info>>{std::make_unique<column>( data_type{type_id::INT8}, 0, rmm::device_buffer{}, rmm::device_buffer{}, 0), std::vector<column_name_info>{}} : device_json_column_to_cudf_column( json_col.child_columns.begin()->second, d_input, options, get_child_schema(json_col.child_columns.begin()->first), stream, mr); column_names.back().children = names; auto [result_bitmask, null_count] = make_validity(json_col); auto ret_col = make_lists_column(num_rows, std::move(offsets_column), std::move(child_column), 0, rmm::device_buffer{0, stream, mr}, stream, mr); // The null_mask is set after creation of list column is to skip the purge_nonempty_nulls and // null validation applied in make_lists_column factory, which is not needed for json // parent column cannot be null when its children is non-empty in JSON ret_col->set_null_mask(std::move(result_bitmask), null_count); return {std::move(ret_col), std::move(column_names)}; } default: CUDF_FAIL("Unsupported column type"); break; } } table_with_metadata device_parse_nested_json(device_span<SymbolT const> d_input, cudf::io::json_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); auto gpu_tree = [&]() { // Parse the JSON and get the token stream const auto [tokens_gpu, token_indices_gpu] = get_token_stream(d_input, options, stream, rmm::mr::get_current_device_resource()); // gpu tree generation return get_tree_representation( tokens_gpu, token_indices_gpu, stream, rmm::mr::get_current_device_resource()); }(); // IILE used to free memory of token data. #ifdef NJP_DEBUG_PRINT auto h_input = cudf::detail::make_host_vector_async(d_input, stream); print_tree(h_input, gpu_tree, stream); #endif bool const is_array_of_arrays = [&]() { std::array<node_t, 2> h_node_categories = {NC_ERR, NC_ERR}; auto const size_to_copy = ::min(size_t{2}, gpu_tree.node_categories.size()); CUDF_CUDA_TRY(hipMemcpyAsync(h_node_categories.data(), gpu_tree.node_categories.data(), sizeof(node_t) * size_to_copy, hipMemcpyDefault, stream.value())); stream.synchronize(); if (options.is_enabled_lines()) return h_node_categories[0] == NC_LIST; return h_node_categories[0] == NC_LIST and h_node_categories[1] == NC_LIST; }(); auto [gpu_col_id, gpu_row_offsets] = records_orient_tree_traversal(d_input, gpu_tree, is_array_of_arrays, options.is_enabled_lines(), stream, rmm::mr::get_current_device_resource()); device_json_column root_column(stream, mr); root_column.type = json_col_t::ListColumn; root_column.child_offsets.resize(2, stream); thrust::fill(rmm::exec_policy(stream), root_column.child_offsets.begin(), root_column.child_offsets.end(), 0); // Get internal JSON column make_device_json_column(d_input, gpu_tree, gpu_col_id, gpu_row_offsets, root_column, is_array_of_arrays, options.is_enabled_lines(), stream, mr); // data_root refers to the root column of the data represented by the given JSON string auto& data_root = options.is_enabled_lines() ? root_column : root_column.child_columns.begin()->second; // Zero row entries if (data_root.type == json_col_t::ListColumn && data_root.child_columns.empty()) { return table_with_metadata{std::make_unique<table>(std::vector<std::unique_ptr<column>>{})}; } // Verify that we were in fact given a list of structs (or in JSON speech: an array of objects) auto constexpr single_child_col_count = 1; CUDF_EXPECTS(data_root.type == json_col_t::ListColumn and data_root.child_columns.size() == single_child_col_count and data_root.child_columns.begin()->second.type == (is_array_of_arrays ? json_col_t::ListColumn : json_col_t::StructColumn), "Input needs to be an array of arrays or an array of (nested) objects"); // Slice off the root list column, which has only a single row that contains all the structs auto& root_struct_col = data_root.child_columns.begin()->second; // Initialize meta data to be populated while recursing through the tree of columns std::vector<std::unique_ptr<column>> out_columns; std::vector<column_name_info> out_column_names; auto parse_opt = parsing_options(options, stream); // Iterate over the struct's child columns and convert to cudf column size_type column_index = 0; for (auto const& col_name : root_struct_col.column_order) { auto& json_col = root_struct_col.child_columns.find(col_name)->second; // Insert this columns name into the schema out_column_names.emplace_back(col_name); std::optional<schema_element> child_schema_element = std::visit( cudf::detail::visitor_overload{ [column_index](std::vector<data_type> const& user_dtypes) -> std::optional<schema_element> { return (static_cast<std::size_t>(column_index) < user_dtypes.size()) ? std::optional<schema_element>{{user_dtypes[column_index]}} : std::optional<schema_element>{}; }, [col_name]( std::map<std::string, data_type> const& user_dtypes) -> std::optional<schema_element> { return (user_dtypes.find(col_name) != std::end(user_dtypes)) ? std::optional<schema_element>{{user_dtypes.find(col_name)->second}} : std::optional<schema_element>{}; }, [col_name](std::map<std::string, schema_element> const& user_dtypes) -> std::optional<schema_element> { return (user_dtypes.find(col_name) != std::end(user_dtypes)) ? user_dtypes.find(col_name)->second : std::optional<schema_element>{}; }}, options.get_dtypes()); #ifdef NJP_DEBUG_PRINT auto debug_schema_print = [](auto ret) { std::cout << ", type id: " << (ret.has_value() ? std::to_string(static_cast<int>(ret->type.id())) : "n/a") << ", with " << (ret.has_value() ? ret->child_types.size() : 0) << " children" << "\n"; }; std::visit( cudf::detail::visitor_overload{[column_index](std::vector<data_type> const&) { std::cout << "Column by index: #" << column_index; }, [col_name](std::map<std::string, data_type> const&) { std::cout << "Column by flat name: '" << col_name; }, [col_name](std::map<std::string, schema_element> const&) { std::cout << "Column by nested name: #" << col_name; }}, options.get_dtypes()); debug_schema_print(child_schema_element); #endif // Get this JSON column's cudf column and schema info, (modifies json_col) auto [cudf_col, col_name_info] = device_json_column_to_cudf_column( json_col, d_input, parse_opt, child_schema_element, stream, mr); // TODO: RangeIndex as DataFrame.columns names for array of arrays // if (is_array_of_arrays) { // col_name_info.back().name = ""; // } out_column_names.back().children = std::move(col_name_info); out_columns.emplace_back(std::move(cudf_col)); column_index++; } return table_with_metadata{std::make_unique<table>(std::move(out_columns)), {out_column_names}}; } } // namespace cudf::io::json::detail
8eac9bc16cb8e0a8f33d3dadab49247110b71248.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "nested_json.hpp" #include <io/utilities/parsing_utils.cuh> #include <io/utilities/type_inference.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/detail/utilities/visitor_overload.hpp> #include <cudf/io/detail/data_casting.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/count.h> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/gather.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/unique.h> #include <cuda/atomic> #include <algorithm> #include <cstdint> namespace cudf::io::json::detail { // DEBUG prints auto to_cat = [](auto v) -> std::string { switch (v) { case NC_STRUCT: return " S"; case NC_LIST: return " L"; case NC_STR: return " \""; case NC_VAL: return " V"; case NC_FN: return " F"; case NC_ERR: return "ER"; default: return "UN"; }; }; auto to_int = [](auto v) { return std::to_string(static_cast<int>(v)); }; auto print_vec = [](auto const& cpu, auto const name, auto converter) { for (auto const& v : cpu) printf("%3s,", converter(v).c_str()); std::cout << name << std::endl; }; void print_tree(host_span<SymbolT const> input, tree_meta_t const& d_gpu_tree, rmm::cuda_stream_view stream) { print_vec(cudf::detail::make_std_vector_async(d_gpu_tree.node_categories, stream), "node_categories", to_cat); print_vec(cudf::detail::make_std_vector_async(d_gpu_tree.parent_node_ids, stream), "parent_node_ids", to_int); print_vec( cudf::detail::make_std_vector_async(d_gpu_tree.node_levels, stream), "node_levels", to_int); auto node_range_begin = cudf::detail::make_std_vector_async(d_gpu_tree.node_range_begin, stream); auto node_range_end = cudf::detail::make_std_vector_async(d_gpu_tree.node_range_end, stream); print_vec(node_range_begin, "node_range_begin", to_int); print_vec(node_range_end, "node_range_end", to_int); for (int i = 0; i < int(node_range_begin.size()); i++) { printf("%3s ", std::string(input.data() + node_range_begin[i], node_range_end[i] - node_range_begin[i]) .c_str()); } printf(" (JSON)\n"); } /** * @brief Reduces node tree representation to column tree representation. * * @param tree Node tree representation of JSON string * @param original_col_ids Column ids of nodes * @param sorted_col_ids Sorted column ids of nodes * @param ordered_node_ids Node ids of nodes sorted by column ids * @param row_offsets Row offsets of nodes * @param is_array_of_arrays Whether the tree is an array of arrays * @param row_array_parent_col_id Column id of row array, if is_array_of_arrays is true * @param stream CUDA stream used for device memory operations and kernel launches * @return A tuple of column tree representation of JSON string, column ids of columns, and * max row offsets of columns */ std::tuple<tree_meta_t, rmm::device_uvector<NodeIndexT>, rmm::device_uvector<size_type>> reduce_to_column_tree(tree_meta_t& tree, device_span<NodeIndexT> original_col_ids, device_span<NodeIndexT> sorted_col_ids, device_span<NodeIndexT> ordered_node_ids, device_span<size_type> row_offsets, bool is_array_of_arrays, NodeIndexT const row_array_parent_col_id, rmm::cuda_stream_view stream) { CUDF_FUNC_RANGE(); // 1. column count for allocation auto const num_columns = thrust::unique_count(rmm::exec_policy(stream), sorted_col_ids.begin(), sorted_col_ids.end()); // 2. reduce_by_key {col_id}, {row_offset}, max. rmm::device_uvector<NodeIndexT> unique_col_ids(num_columns, stream); rmm::device_uvector<size_type> max_row_offsets(num_columns, stream); auto ordered_row_offsets = thrust::make_permutation_iterator(row_offsets.begin(), ordered_node_ids.begin()); thrust::reduce_by_key(rmm::exec_policy(stream), sorted_col_ids.begin(), sorted_col_ids.end(), ordered_row_offsets, unique_col_ids.begin(), max_row_offsets.begin(), thrust::equal_to<size_type>(), thrust::maximum<size_type>()); // 3. reduce_by_key {col_id}, {node_categories} - custom opp (*+v=*, v+v=v, *+#=E) rmm::device_uvector<NodeT> column_categories(num_columns, stream); thrust::reduce_by_key( rmm::exec_policy(stream), sorted_col_ids.begin(), sorted_col_ids.end(), thrust::make_permutation_iterator(tree.node_categories.begin(), ordered_node_ids.begin()), unique_col_ids.begin(), column_categories.begin(), thrust::equal_to<size_type>(), [] __device__(NodeT type_a, NodeT type_b) -> NodeT { auto is_a_leaf = (type_a == NC_VAL || type_a == NC_STR); auto is_b_leaf = (type_b == NC_VAL || type_b == NC_STR); // (v+v=v, *+*=*, *+v=*, *+#=E, NESTED+VAL=NESTED) // *+*=*, v+v=v if (type_a == type_b) { return type_a; } else if (is_a_leaf) { // *+v=*, N+V=N // STRUCT/LIST + STR/VAL = STRUCT/LIST, STR/VAL + FN = ERR, STR/VAL + STR = STR return type_b == NC_FN ? NC_ERR : (is_b_leaf ? NC_STR : type_b); } else if (is_b_leaf) { return type_a == NC_FN ? NC_ERR : (is_a_leaf ? NC_STR : type_a); } // *+#=E return NC_ERR; }); // 4. unique_copy parent_node_ids, ranges rmm::device_uvector<TreeDepthT> column_levels(0, stream); // not required rmm::device_uvector<NodeIndexT> parent_col_ids(num_columns, stream); rmm::device_uvector<SymbolOffsetT> col_range_begin(num_columns, stream); // Field names rmm::device_uvector<SymbolOffsetT> col_range_end(num_columns, stream); rmm::device_uvector<size_type> unique_node_ids(num_columns, stream); thrust::unique_by_key_copy(rmm::exec_policy(stream), sorted_col_ids.begin(), sorted_col_ids.end(), ordered_node_ids.begin(), thrust::make_discard_iterator(), unique_node_ids.begin()); thrust::copy_n( rmm::exec_policy(stream), thrust::make_zip_iterator( thrust::make_permutation_iterator(tree.parent_node_ids.begin(), unique_node_ids.begin()), thrust::make_permutation_iterator(tree.node_range_begin.begin(), unique_node_ids.begin()), thrust::make_permutation_iterator(tree.node_range_end.begin(), unique_node_ids.begin())), unique_node_ids.size(), thrust::make_zip_iterator( parent_col_ids.begin(), col_range_begin.begin(), col_range_end.begin())); // convert parent_node_ids to parent_col_ids thrust::transform( rmm::exec_policy(stream), parent_col_ids.begin(), parent_col_ids.end(), parent_col_ids.begin(), [col_ids = original_col_ids.begin()] __device__(auto parent_node_id) -> size_type { return parent_node_id == parent_node_sentinel ? parent_node_sentinel : col_ids[parent_node_id]; }); // condition is true if parent is not a list, or sentinel/root // Special case to return true if parent is a list and is_array_of_arrays is true auto is_non_list_parent = [column_categories = column_categories.begin(), is_array_of_arrays, row_array_parent_col_id] __device__(auto parent_col_id) -> bool { return !(parent_col_id == parent_node_sentinel || column_categories[parent_col_id] == NC_LIST && (!is_array_of_arrays || parent_col_id != row_array_parent_col_id)); }; // Mixed types in List children go to different columns, // so all immediate children of list column should have same max_row_offsets. // create list's children max_row_offsets array. (initialize to zero) // atomicMax on children max_row_offsets array. // gather the max_row_offsets from children row offset array. { rmm::device_uvector<NodeIndexT> list_parents_children_max_row_offsets(num_columns, stream); thrust::fill(rmm::exec_policy(stream), list_parents_children_max_row_offsets.begin(), list_parents_children_max_row_offsets.end(), 0); thrust::for_each(rmm::exec_policy(stream), unique_col_ids.begin(), unique_col_ids.end(), [column_categories = column_categories.begin(), parent_col_ids = parent_col_ids.begin(), max_row_offsets = max_row_offsets.begin(), list_parents_children_max_row_offsets = list_parents_children_max_row_offsets.begin()] __device__(auto col_id) { auto parent_col_id = parent_col_ids[col_id]; if (parent_col_id != parent_node_sentinel and column_categories[parent_col_id] == node_t::NC_LIST) { cuda::atomic_ref<NodeIndexT, cuda::thread_scope_device> ref{ *(list_parents_children_max_row_offsets + parent_col_id)}; ref.fetch_max(max_row_offsets[col_id], cuda::std::memory_order_relaxed); } }); thrust::gather_if( rmm::exec_policy(stream), parent_col_ids.begin(), parent_col_ids.end(), parent_col_ids.begin(), list_parents_children_max_row_offsets.begin(), max_row_offsets.begin(), [column_categories = column_categories.begin()] __device__(size_type parent_col_id) { return parent_col_id != parent_node_sentinel and column_categories[parent_col_id] == node_t::NC_LIST; }); } // copy lists' max_row_offsets to children. // all structs should have same size. thrust::transform_if( rmm::exec_policy(stream), unique_col_ids.begin(), unique_col_ids.end(), max_row_offsets.begin(), [column_categories = column_categories.begin(), is_non_list_parent, parent_col_ids = parent_col_ids.begin(), max_row_offsets = max_row_offsets.begin()] __device__(size_type col_id) { auto parent_col_id = parent_col_ids[col_id]; // condition is true if parent is not a list, or sentinel/root while (is_non_list_parent(parent_col_id)) { col_id = parent_col_id; parent_col_id = parent_col_ids[parent_col_id]; } return max_row_offsets[col_id]; }, [column_categories = column_categories.begin(), is_non_list_parent, parent_col_ids = parent_col_ids.begin()] __device__(size_type col_id) { auto parent_col_id = parent_col_ids[col_id]; // condition is true if parent is not a list, or sentinel/root return is_non_list_parent(parent_col_id); }); return std::tuple{tree_meta_t{std::move(column_categories), std::move(parent_col_ids), std::move(column_levels), std::move(col_range_begin), std::move(col_range_end)}, std::move(unique_col_ids), std::move(max_row_offsets)}; } /** * @brief Get the column indices for the values column for array of arrays rows * * @param row_array_children_level The level of the row array's children * @param d_tree The tree metadata * @param col_ids The column ids * @param num_columns The number of columns * @param stream The stream to use * @return The value columns' indices */ rmm::device_uvector<NodeIndexT> get_values_column_indices(TreeDepthT const row_array_children_level, tree_meta_t const& d_tree, device_span<NodeIndexT> col_ids, size_type const num_columns, rmm::cuda_stream_view stream) { CUDF_FUNC_RANGE(); auto [level2_nodes, level2_indices] = get_array_children_indices( row_array_children_level, d_tree.node_levels, d_tree.parent_node_ids, stream); auto col_id_location = thrust::make_permutation_iterator(col_ids.begin(), level2_nodes.begin()); rmm::device_uvector<NodeIndexT> values_column_indices(num_columns, stream); thrust::scatter(rmm::exec_policy(stream), level2_indices.begin(), level2_indices.end(), col_id_location, values_column_indices.begin()); return values_column_indices; } /** * @brief Copies strings specified by pair of begin, end offsets to host vector of strings. * * @param input String device buffer * @param node_range_begin Begin offset of the strings * @param node_range_end End offset of the strings * @param stream CUDA stream * @return Vector of strings */ std::vector<std::string> copy_strings_to_host(device_span<SymbolT const> input, device_span<SymbolOffsetT const> node_range_begin, device_span<SymbolOffsetT const> node_range_end, rmm::cuda_stream_view stream) { CUDF_FUNC_RANGE(); auto const num_strings = node_range_begin.size(); rmm::device_uvector<thrust::pair<char const*, size_type>> string_views(num_strings, stream); auto d_offset_pairs = thrust::make_zip_iterator(node_range_begin.begin(), node_range_end.begin()); thrust::transform(rmm::exec_policy(stream), d_offset_pairs, d_offset_pairs + num_strings, string_views.begin(), [data = input.data()] __device__(auto const& offsets) { // Note: first character for non-field columns return thrust::make_pair( data + thrust::get<0>(offsets), static_cast<size_type>(thrust::get<1>(offsets) - thrust::get<0>(offsets))); }); cudf::io::parse_options_view options_view{}; options_view.quotechar = '\0'; // no quotes options_view.keepquotes = true; auto d_column_names = parse_data(string_views.begin(), num_strings, data_type{type_id::STRING}, rmm::device_buffer{}, 0, options_view, stream, rmm::mr::get_current_device_resource()); auto to_host = [stream](auto const& col) { if (col.is_empty()) return std::vector<std::string>{}; auto const scv = cudf::strings_column_view(col); auto const h_chars = cudf::detail::make_std_vector_sync<char>( cudf::device_span<char const>(scv.chars().data<char>(), scv.chars().size()), stream); auto const h_offsets = cudf::detail::make_std_vector_sync( cudf::device_span<cudf::size_type const>(scv.offsets().data<cudf::size_type>() + scv.offset(), scv.size() + 1), stream); // build std::string vector from chars and offsets std::vector<std::string> host_data; host_data.reserve(col.size()); std::transform( std::begin(h_offsets), std::end(h_offsets) - 1, std::begin(h_offsets) + 1, std::back_inserter(host_data), [&](auto start, auto end) { return std::string(h_chars.data() + start, end - start); }); return host_data; }; return to_host(d_column_names->view()); } /** * @brief Holds member data pointers of `d_json_column` * */ struct json_column_data { using row_offset_t = json_column::row_offset_t; row_offset_t* string_offsets; row_offset_t* string_lengths; row_offset_t* child_offsets; bitmask_type* validity; }; /** * @brief Constructs `d_json_column` from node tree representation * Newly constructed columns are insert into `root`'s children. * `root` must be a list type. * * @param input Input JSON string device data * @param tree Node tree representation of the JSON string * @param col_ids Column ids of the nodes in the tree * @param row_offsets Row offsets of the nodes in the tree * @param root Root node of the `d_json_column` tree * @param is_array_of_arrays Whether the tree is an array of arrays * @param is_enabled_lines Whether the input is a line-delimited JSON * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the device memory * of child_offets and validity members of `d_json_column` */ void make_device_json_column(device_span<SymbolT const> input, tree_meta_t& tree, device_span<NodeIndexT> col_ids, device_span<size_type> row_offsets, device_json_column& root, bool is_array_of_arrays, bool is_enabled_lines, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); auto num_nodes = col_ids.size(); rmm::device_uvector<NodeIndexT> sorted_col_ids(col_ids.size(), stream); // make a copy thrust::copy(rmm::exec_policy(stream), col_ids.begin(), col_ids.end(), sorted_col_ids.begin()); // sort by {col_id} on {node_ids} stable rmm::device_uvector<NodeIndexT> node_ids(col_ids.size(), stream); thrust::sequence(rmm::exec_policy(stream), node_ids.begin(), node_ids.end()); thrust::stable_sort_by_key( rmm::exec_policy(stream), sorted_col_ids.begin(), sorted_col_ids.end(), node_ids.begin()); NodeIndexT const row_array_parent_col_id = [&]() { if (!is_array_of_arrays) return parent_node_sentinel; auto const list_node_index = is_enabled_lines ? 0 : 1; NodeIndexT value; CUDF_CUDA_TRY(cudaMemcpyAsync(&value, col_ids.data() + list_node_index, sizeof(NodeIndexT), cudaMemcpyDefault, stream.value())); stream.synchronize(); return value; }(); // 1. gather column information. auto [d_column_tree, d_unique_col_ids, d_max_row_offsets] = reduce_to_column_tree(tree, col_ids, sorted_col_ids, node_ids, row_offsets, is_array_of_arrays, row_array_parent_col_id, stream); auto num_columns = d_unique_col_ids.size(); auto unique_col_ids = cudf::detail::make_std_vector_async(d_unique_col_ids, stream); auto column_categories = cudf::detail::make_std_vector_async(d_column_tree.node_categories, stream); auto column_parent_ids = cudf::detail::make_std_vector_async(d_column_tree.parent_node_ids, stream); auto column_range_beg = cudf::detail::make_std_vector_async(d_column_tree.node_range_begin, stream); auto max_row_offsets = cudf::detail::make_std_vector_async(d_max_row_offsets, stream); std::vector<std::string> column_names = copy_strings_to_host( input, d_column_tree.node_range_begin, d_column_tree.node_range_end, stream); // array of arrays column names if (is_array_of_arrays) { TreeDepthT const row_array_children_level = is_enabled_lines ? 1 : 2; auto values_column_indices = get_values_column_indices(row_array_children_level, tree, col_ids, num_columns, stream); auto h_values_column_indices = cudf::detail::make_std_vector_async(values_column_indices, stream); std::transform(unique_col_ids.begin(), unique_col_ids.end(), column_names.begin(), column_names.begin(), [&h_values_column_indices, &column_parent_ids, row_array_parent_col_id]( auto col_id, auto name) mutable { return column_parent_ids[col_id] == row_array_parent_col_id ? std::to_string(h_values_column_indices[col_id]) : name; }); } auto to_json_col_type = [](auto category) { switch (category) { case NC_STRUCT: return json_col_t::StructColumn; case NC_LIST: return json_col_t::ListColumn; case NC_STR: [[fallthrough]]; case NC_VAL: return json_col_t::StringColumn; default: return json_col_t::Unknown; } }; auto init_to_zero = [stream](auto& v) { thrust::uninitialized_fill(rmm::exec_policy(stream), v.begin(), v.end(), 0); }; auto initialize_json_columns = [&](auto i, auto& col) { if (column_categories[i] == NC_ERR || column_categories[i] == NC_FN) { return; } else if (column_categories[i] == NC_VAL || column_categories[i] == NC_STR) { col.string_offsets.resize(max_row_offsets[i] + 1, stream); col.string_lengths.resize(max_row_offsets[i] + 1, stream); init_to_zero(col.string_offsets); init_to_zero(col.string_lengths); } else if (column_categories[i] == NC_LIST) { col.child_offsets.resize(max_row_offsets[i] + 2, stream); init_to_zero(col.child_offsets); } col.num_rows = max_row_offsets[i] + 1; col.validity = cudf::detail::create_null_mask(col.num_rows, cudf::mask_state::ALL_NULL, stream, mr); col.type = to_json_col_type(column_categories[i]); }; // 2. generate nested columns tree and its device_memory // reorder unique_col_ids w.r.t. column_range_begin for order of column to be in field order. auto h_range_col_id_it = thrust::make_zip_iterator(column_range_beg.begin(), unique_col_ids.begin()); std::sort(h_range_col_id_it, h_range_col_id_it + num_columns, [](auto const& a, auto const& b) { return thrust::get<0>(a) < thrust::get<0>(b); }); // use hash map because we may skip field name's col_ids std::unordered_map<NodeIndexT, std::reference_wrapper<device_json_column>> columns; // map{parent_col_id, child_col_name}> = child_col_id, used for null value column tracking std::map<std::pair<NodeIndexT, std::string>, NodeIndexT> mapped_columns; // find column_ids which are values, but should be ignored in validity std::vector<uint8_t> ignore_vals(num_columns, 0); columns.try_emplace(parent_node_sentinel, std::ref(root)); for (auto const this_col_id : unique_col_ids) { if (column_categories[this_col_id] == NC_ERR || column_categories[this_col_id] == NC_FN) { continue; } // Struct, List, String, Value std::string name = ""; auto parent_col_id = column_parent_ids[this_col_id]; if (parent_col_id == parent_node_sentinel || column_categories[parent_col_id] == NC_LIST) { if (is_array_of_arrays && parent_col_id == row_array_parent_col_id) { name = column_names[this_col_id]; } else { name = list_child_name; } } else if (column_categories[parent_col_id] == NC_FN) { auto field_name_col_id = parent_col_id; parent_col_id = column_parent_ids[parent_col_id]; name = column_names[field_name_col_id]; } else { CUDF_FAIL("Unexpected parent column category"); } // If the child is already found, // replace if this column is a nested column and the existing was a value column // ignore this column if this column is a value column and the existing was a nested column auto it = columns.find(parent_col_id); CUDF_EXPECTS(it != columns.end(), "Parent column not found"); auto& parent_col = it->second.get(); bool replaced = false; if (mapped_columns.count({parent_col_id, name}) > 0) { if (column_categories[this_col_id] == NC_VAL || column_categories[this_col_id] == NC_STR) { ignore_vals[this_col_id] = 1; continue; } auto old_col_id = mapped_columns[{parent_col_id, name}]; if (column_categories[old_col_id] == NC_VAL || column_categories[old_col_id] == NC_STR) { // remap ignore_vals[old_col_id] = 1; mapped_columns.erase({parent_col_id, name}); columns.erase(old_col_id); parent_col.child_columns.erase(name); replaced = true; // to skip duplicate name in column_order } else { // If this is a nested column but we're trying to insert either (a) a list node into a // struct column or (b) a struct node into a list column, we fail CUDF_EXPECTS(not((column_categories[old_col_id] == NC_LIST and column_categories[this_col_id] == NC_STRUCT) or (column_categories[old_col_id] == NC_STRUCT and column_categories[this_col_id] == NC_LIST)), "A mix of lists and structs within the same column is not supported"); } } CUDF_EXPECTS(parent_col.child_columns.count(name) == 0, "duplicate column name: " + name); // move into parent device_json_column col(stream, mr); initialize_json_columns(this_col_id, col); auto inserted = parent_col.child_columns.try_emplace(name, std::move(col)).second; CUDF_EXPECTS(inserted, "child column insertion failed, duplicate column name in the parent"); if (not replaced) parent_col.column_order.push_back(name); columns.try_emplace(this_col_id, std::ref(parent_col.child_columns.at(name))); mapped_columns.try_emplace(std::make_pair(parent_col_id, name), this_col_id); } // restore unique_col_ids order std::sort(h_range_col_id_it, h_range_col_id_it + num_columns, [](auto const& a, auto const& b) { return thrust::get<1>(a) < thrust::get<1>(b); }); // move columns data to device. std::vector<json_column_data> columns_data(num_columns); for (auto& [col_id, col_ref] : columns) { if (col_id == parent_node_sentinel) continue; auto& col = col_ref.get(); columns_data[col_id] = json_column_data{col.string_offsets.data(), col.string_lengths.data(), col.child_offsets.data(), static_cast<bitmask_type*>(col.validity.data())}; } auto d_ignore_vals = cudf::detail::make_device_uvector_async( ignore_vals, stream, rmm::mr::get_current_device_resource()); auto d_columns_data = cudf::detail::make_device_uvector_async( columns_data, stream, rmm::mr::get_current_device_resource()); // 3. scatter string offsets to respective columns, set validity bits thrust::for_each_n( rmm::exec_policy(stream), thrust::counting_iterator<size_type>(0), num_nodes, [node_categories = tree.node_categories.begin(), col_ids = col_ids.begin(), row_offsets = row_offsets.begin(), range_begin = tree.node_range_begin.begin(), range_end = tree.node_range_end.begin(), d_ignore_vals = d_ignore_vals.begin(), d_columns_data = d_columns_data.begin()] __device__(size_type i) { switch (node_categories[i]) { case NC_STRUCT: set_bit(d_columns_data[col_ids[i]].validity, row_offsets[i]); break; case NC_LIST: set_bit(d_columns_data[col_ids[i]].validity, row_offsets[i]); break; case NC_STR: [[fallthrough]]; case NC_VAL: if (d_ignore_vals[col_ids[i]]) break; set_bit(d_columns_data[col_ids[i]].validity, row_offsets[i]); d_columns_data[col_ids[i]].string_offsets[row_offsets[i]] = range_begin[i]; d_columns_data[col_ids[i]].string_lengths[row_offsets[i]] = range_end[i] - range_begin[i]; break; default: break; } }); // 4. scatter List offset // copy_if only node's whose parent is list, (node_id, parent_col_id) // stable_sort by parent_col_id of {node_id}. // For all unique parent_node_id of (i==0, i-1!=i), write start offset. // (i==last, i+1!=i), write end offset. // unique_copy_by_key {parent_node_id} {row_offset} to // col[parent_col_id].child_offsets[row_offset[parent_node_id]] auto& parent_col_ids = sorted_col_ids; // reuse sorted_col_ids auto parent_col_id = thrust::make_transform_iterator( thrust::make_counting_iterator<size_type>(0), [col_ids = col_ids.begin(), parent_node_ids = tree.parent_node_ids.begin()] __device__(size_type node_id) { return parent_node_ids[node_id] == parent_node_sentinel ? parent_node_sentinel : col_ids[parent_node_ids[node_id]]; }); auto const list_children_end = thrust::copy_if( rmm::exec_policy(stream), thrust::make_zip_iterator(thrust::make_counting_iterator<size_type>(0), parent_col_id), thrust::make_zip_iterator(thrust::make_counting_iterator<size_type>(0), parent_col_id) + num_nodes, thrust::make_counting_iterator<size_type>(0), thrust::make_zip_iterator(node_ids.begin(), parent_col_ids.begin()), [node_categories = tree.node_categories.begin(), parent_node_ids = tree.parent_node_ids.begin()] __device__(size_type node_id) { auto parent_node_id = parent_node_ids[node_id]; return parent_node_id != parent_node_sentinel and node_categories[parent_node_id] == NC_LIST; }); auto const num_list_children = list_children_end - thrust::make_zip_iterator(node_ids.begin(), parent_col_ids.begin()); thrust::stable_sort_by_key(rmm::exec_policy(stream), parent_col_ids.begin(), parent_col_ids.begin() + num_list_children, node_ids.begin()); thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), num_list_children, [node_ids = node_ids.begin(), parent_node_ids = tree.parent_node_ids.begin(), parent_col_ids = parent_col_ids.begin(), row_offsets = row_offsets.begin(), d_columns_data = d_columns_data.begin(), num_list_children] __device__(size_type i) { auto const node_id = node_ids[i]; auto const parent_node_id = parent_node_ids[node_id]; // scatter to list_offset if (i == 0 or parent_node_ids[node_ids[i - 1]] != parent_node_id) { d_columns_data[parent_col_ids[i]].child_offsets[row_offsets[parent_node_id]] = row_offsets[node_id]; } // last value of list child_offset is its size. if (i == num_list_children - 1 or parent_node_ids[node_ids[i + 1]] != parent_node_id) { d_columns_data[parent_col_ids[i]].child_offsets[row_offsets[parent_node_id] + 1] = row_offsets[node_id] + 1; } }); // 5. scan on offsets. for (auto& [id, col_ref] : columns) { auto& col = col_ref.get(); if (col.type == json_col_t::StringColumn) { thrust::inclusive_scan(rmm::exec_policy(stream), col.string_offsets.begin(), col.string_offsets.end(), col.string_offsets.begin(), thrust::maximum<json_column::row_offset_t>{}); } else if (col.type == json_col_t::ListColumn) { thrust::inclusive_scan(rmm::exec_policy(stream), col.child_offsets.begin(), col.child_offsets.end(), col.child_offsets.begin(), thrust::maximum<json_column::row_offset_t>{}); } } } /** * @brief Retrieves the parse_options to be used for type inference and type casting * * @param options The reader options to influence the relevant type inference and type casting * options */ cudf::io::parse_options parsing_options(cudf::io::json_reader_options const& options, rmm::cuda_stream_view stream); std::pair<std::unique_ptr<column>, std::vector<column_name_info>> device_json_column_to_cudf_column( device_json_column& json_col, device_span<SymbolT const> d_input, cudf::io::parse_options const& options, std::optional<schema_element> schema, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); auto validity_size_check = [](device_json_column& json_col) { CUDF_EXPECTS(json_col.validity.size() >= bitmask_allocation_size_bytes(json_col.num_rows), "valid_count is too small"); }; auto make_validity = [stream, validity_size_check]( device_json_column& json_col) -> std::pair<rmm::device_buffer, size_type> { validity_size_check(json_col); auto null_count = cudf::detail::null_count( static_cast<bitmask_type*>(json_col.validity.data()), 0, json_col.num_rows, stream); // full null_mask is always required for parse_data return {std::move(json_col.validity), null_count}; // Note: json_col modified here, moves this memory }; auto get_child_schema = [schema](auto child_name) -> std::optional<schema_element> { if (schema.has_value()) { auto const result = schema.value().child_types.find(child_name); if (result != std::end(schema.value().child_types)) { return result->second; } } return {}; }; switch (json_col.type) { case json_col_t::StringColumn: { // move string_offsets to GPU and transform to string column auto const col_size = json_col.string_offsets.size(); using char_length_pair_t = thrust::pair<char const*, size_type>; CUDF_EXPECTS(json_col.string_offsets.size() == json_col.string_lengths.size(), "string offset, string length mismatch"); rmm::device_uvector<char_length_pair_t> d_string_data(col_size, stream); // TODO how about directly storing pair<char*, size_t> in json_column? auto offset_length_it = thrust::make_zip_iterator(json_col.string_offsets.begin(), json_col.string_lengths.begin()); // Prepare iterator that returns (string_offset, string_length)-pairs needed by inference auto string_ranges_it = thrust::make_transform_iterator(offset_length_it, [] __device__(auto ip) { return thrust::pair<json_column::row_offset_t, std::size_t>{ thrust::get<0>(ip), static_cast<std::size_t>(thrust::get<1>(ip))}; }); // Prepare iterator that returns (string_ptr, string_length)-pairs needed by type conversion auto string_spans_it = thrust::make_transform_iterator( offset_length_it, [data = d_input.data()] __device__(auto ip) { return thrust::pair<char const*, std::size_t>{ data + thrust::get<0>(ip), static_cast<std::size_t>(thrust::get<1>(ip))}; }); data_type target_type{}; if (schema.has_value()) { #ifdef NJP_DEBUG_PRINT std::cout << "-> explicit type: " << (schema.has_value() ? std::to_string(static_cast<int>(schema->type.id())) : "n/a"); #endif target_type = schema.value().type; } // Infer column type, if we don't have an explicit type for it else { target_type = cudf::io::detail::infer_data_type( options.json_view(), d_input, string_ranges_it, col_size, stream); } auto [result_bitmask, null_count] = make_validity(json_col); // Convert strings to the inferred data type auto col = parse_data(string_spans_it, col_size, target_type, std::move(result_bitmask), null_count, options.view(), stream, mr); // Reset nullable if we do not have nulls // This is to match the existing JSON reader's behaviour: // - Non-string columns will always be returned as nullable // - String columns will be returned as nullable, iff there's at least one null entry if (target_type.id() == type_id::STRING and col->null_count() == 0) { col->set_null_mask(rmm::device_buffer{0, stream, mr}, 0); } // For string columns return ["offsets", "char"] schema if (target_type.id() == type_id::STRING) { return {std::move(col), std::vector<column_name_info>{{"offsets"}, {"chars"}}}; } // Non-string leaf-columns (e.g., numeric) do not have child columns in the schema return {std::move(col), std::vector<column_name_info>{}}; } case json_col_t::StructColumn: { std::vector<std::unique_ptr<column>> child_columns; std::vector<column_name_info> column_names{}; size_type num_rows{json_col.num_rows}; // Create children columns for (auto const& col_name : json_col.column_order) { auto const& col = json_col.child_columns.find(col_name); column_names.emplace_back(col->first); auto& child_col = col->second; auto [child_column, names] = device_json_column_to_cudf_column( child_col, d_input, options, get_child_schema(col_name), stream, mr); CUDF_EXPECTS(num_rows == child_column->size(), "All children columns must have the same size"); child_columns.push_back(std::move(child_column)); column_names.back().children = names; } auto [result_bitmask, null_count] = make_validity(json_col); // The null_mask is set after creation of struct column is to skip the superimpose_nulls and // null validation applied in make_structs_column factory, which is not needed for json auto ret_col = make_structs_column(num_rows, std::move(child_columns), 0, {}, stream, mr); ret_col->set_null_mask(std::move(result_bitmask), null_count); return {std::move(ret_col), column_names}; } case json_col_t::ListColumn: { size_type num_rows = json_col.child_offsets.size() - 1; std::vector<column_name_info> column_names{}; column_names.emplace_back("offsets"); column_names.emplace_back( json_col.child_columns.empty() ? list_child_name : json_col.child_columns.begin()->first); // Note: json_col modified here, reuse the memory auto offsets_column = std::make_unique<column>(data_type{type_id::INT32}, num_rows + 1, json_col.child_offsets.release(), rmm::device_buffer{}, 0); // Create children column auto [child_column, names] = json_col.child_columns.empty() ? std::pair<std::unique_ptr<column>, // EMPTY type could not used because gather throws exception on EMPTY type. std::vector<column_name_info>>{std::make_unique<column>( data_type{type_id::INT8}, 0, rmm::device_buffer{}, rmm::device_buffer{}, 0), std::vector<column_name_info>{}} : device_json_column_to_cudf_column( json_col.child_columns.begin()->second, d_input, options, get_child_schema(json_col.child_columns.begin()->first), stream, mr); column_names.back().children = names; auto [result_bitmask, null_count] = make_validity(json_col); auto ret_col = make_lists_column(num_rows, std::move(offsets_column), std::move(child_column), 0, rmm::device_buffer{0, stream, mr}, stream, mr); // The null_mask is set after creation of list column is to skip the purge_nonempty_nulls and // null validation applied in make_lists_column factory, which is not needed for json // parent column cannot be null when its children is non-empty in JSON ret_col->set_null_mask(std::move(result_bitmask), null_count); return {std::move(ret_col), std::move(column_names)}; } default: CUDF_FAIL("Unsupported column type"); break; } } table_with_metadata device_parse_nested_json(device_span<SymbolT const> d_input, cudf::io::json_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); auto gpu_tree = [&]() { // Parse the JSON and get the token stream const auto [tokens_gpu, token_indices_gpu] = get_token_stream(d_input, options, stream, rmm::mr::get_current_device_resource()); // gpu tree generation return get_tree_representation( tokens_gpu, token_indices_gpu, stream, rmm::mr::get_current_device_resource()); }(); // IILE used to free memory of token data. #ifdef NJP_DEBUG_PRINT auto h_input = cudf::detail::make_host_vector_async(d_input, stream); print_tree(h_input, gpu_tree, stream); #endif bool const is_array_of_arrays = [&]() { std::array<node_t, 2> h_node_categories = {NC_ERR, NC_ERR}; auto const size_to_copy = std::min(size_t{2}, gpu_tree.node_categories.size()); CUDF_CUDA_TRY(cudaMemcpyAsync(h_node_categories.data(), gpu_tree.node_categories.data(), sizeof(node_t) * size_to_copy, cudaMemcpyDefault, stream.value())); stream.synchronize(); if (options.is_enabled_lines()) return h_node_categories[0] == NC_LIST; return h_node_categories[0] == NC_LIST and h_node_categories[1] == NC_LIST; }(); auto [gpu_col_id, gpu_row_offsets] = records_orient_tree_traversal(d_input, gpu_tree, is_array_of_arrays, options.is_enabled_lines(), stream, rmm::mr::get_current_device_resource()); device_json_column root_column(stream, mr); root_column.type = json_col_t::ListColumn; root_column.child_offsets.resize(2, stream); thrust::fill(rmm::exec_policy(stream), root_column.child_offsets.begin(), root_column.child_offsets.end(), 0); // Get internal JSON column make_device_json_column(d_input, gpu_tree, gpu_col_id, gpu_row_offsets, root_column, is_array_of_arrays, options.is_enabled_lines(), stream, mr); // data_root refers to the root column of the data represented by the given JSON string auto& data_root = options.is_enabled_lines() ? root_column : root_column.child_columns.begin()->second; // Zero row entries if (data_root.type == json_col_t::ListColumn && data_root.child_columns.empty()) { return table_with_metadata{std::make_unique<table>(std::vector<std::unique_ptr<column>>{})}; } // Verify that we were in fact given a list of structs (or in JSON speech: an array of objects) auto constexpr single_child_col_count = 1; CUDF_EXPECTS(data_root.type == json_col_t::ListColumn and data_root.child_columns.size() == single_child_col_count and data_root.child_columns.begin()->second.type == (is_array_of_arrays ? json_col_t::ListColumn : json_col_t::StructColumn), "Input needs to be an array of arrays or an array of (nested) objects"); // Slice off the root list column, which has only a single row that contains all the structs auto& root_struct_col = data_root.child_columns.begin()->second; // Initialize meta data to be populated while recursing through the tree of columns std::vector<std::unique_ptr<column>> out_columns; std::vector<column_name_info> out_column_names; auto parse_opt = parsing_options(options, stream); // Iterate over the struct's child columns and convert to cudf column size_type column_index = 0; for (auto const& col_name : root_struct_col.column_order) { auto& json_col = root_struct_col.child_columns.find(col_name)->second; // Insert this columns name into the schema out_column_names.emplace_back(col_name); std::optional<schema_element> child_schema_element = std::visit( cudf::detail::visitor_overload{ [column_index](std::vector<data_type> const& user_dtypes) -> std::optional<schema_element> { return (static_cast<std::size_t>(column_index) < user_dtypes.size()) ? std::optional<schema_element>{{user_dtypes[column_index]}} : std::optional<schema_element>{}; }, [col_name]( std::map<std::string, data_type> const& user_dtypes) -> std::optional<schema_element> { return (user_dtypes.find(col_name) != std::end(user_dtypes)) ? std::optional<schema_element>{{user_dtypes.find(col_name)->second}} : std::optional<schema_element>{}; }, [col_name](std::map<std::string, schema_element> const& user_dtypes) -> std::optional<schema_element> { return (user_dtypes.find(col_name) != std::end(user_dtypes)) ? user_dtypes.find(col_name)->second : std::optional<schema_element>{}; }}, options.get_dtypes()); #ifdef NJP_DEBUG_PRINT auto debug_schema_print = [](auto ret) { std::cout << ", type id: " << (ret.has_value() ? std::to_string(static_cast<int>(ret->type.id())) : "n/a") << ", with " << (ret.has_value() ? ret->child_types.size() : 0) << " children" << "\n"; }; std::visit( cudf::detail::visitor_overload{[column_index](std::vector<data_type> const&) { std::cout << "Column by index: #" << column_index; }, [col_name](std::map<std::string, data_type> const&) { std::cout << "Column by flat name: '" << col_name; }, [col_name](std::map<std::string, schema_element> const&) { std::cout << "Column by nested name: #" << col_name; }}, options.get_dtypes()); debug_schema_print(child_schema_element); #endif // Get this JSON column's cudf column and schema info, (modifies json_col) auto [cudf_col, col_name_info] = device_json_column_to_cudf_column( json_col, d_input, parse_opt, child_schema_element, stream, mr); // TODO: RangeIndex as DataFrame.columns names for array of arrays // if (is_array_of_arrays) { // col_name_info.back().name = ""; // } out_column_names.back().children = std::move(col_name_info); out_columns.emplace_back(std::move(cudf_col)); column_index++; } return table_with_metadata{std::make_unique<table>(std::move(out_columns)), {out_column_names}}; } } // namespace cudf::io::json::detail
ba9ba914bd78243309cda640594df23efe2d5ac7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected], created on 30.11.17. // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/col2im.h> #include <PointersManager.h> namespace nd4j { namespace ops { namespace helpers { ////////////////////////////////////////////////////////////////////////// // columns [bS, iC, kH, kW, oH, oW] to be de-convoluted to image [bS, iC, iH, iW] template <typename T> static __global__ void col2imCuda(const void* columns, const Nd4jLong* colShapeInfo, void* image, const Nd4jLong* imShapeInfo, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW) { const T* col = reinterpret_cast<const T*>(columns); T* im = reinterpret_cast<T*>(image); __shared__ uint kH, kW, oH, oW, *sharedMem; __shared__ Nd4jLong imLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<uint*>(shmem); kH = dH * (colShapeInfo[3] - 1) + 1; kW = dW * (colShapeInfo[4] - 1) + 1; oH = colShapeInfo[5]; oW = colShapeInfo[6]; imLen = shape::length(imShapeInfo); } __syncthreads(); auto coords = sharedMem + threadIdx.x * 6; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < imLen; i += gridDim.x * blockDim.x) { shape::index2coords(i, imShapeInfo, coords); const auto imOffset = shape::getOffset(imShapeInfo, coords); const auto bSiCoffset = coords[0] * colShapeInfo[7] + coords[1] * colShapeInfo[8]; const uint imH = coords[2] + pH; const uint imW = coords[3] + pW; const uint colHstart = (imH < kH) ? 0 : (imH - kH) / sH + 1; const uint colWstart = (imW < kW) ? 0 : (imW - kW) / sW + 1; const uint colHend = nd4j::math::nd4j_min<uint>(imH / sH + 1, oH); const uint colWend = nd4j::math::nd4j_min<uint>(imW / sW + 1, oW); T val = 0; for(coords[4] = colHstart; coords[4] < colHend; ++coords[4]) { coords[2] = imH - coords[4] * sH; if(coords[2] % dH != 0) continue; for(coords[5] = colWstart; coords[5] < colWend; ++coords[5]) { coords[3] = imW - coords[5] * sW; if(coords[3] % dW != 0) continue; val += col[bSiCoffset + (coords[2]/dH)*colShapeInfo[9] + (coords[3]/dW)*colShapeInfo[10] + coords[4]*colShapeInfo[11] + coords[5]*colShapeInfo[12]]; } } im[imOffset] = val; } } //////////////////////////////////////////////////////////////////////// // columns [bS, iC, kH, kW, oH, oW] to be de-convoluted to image [bS, iC, iH, iW] template<typename T> __global__ static void col2imCuda2(const void *columns, void *image, const Nd4jLong *colShapeInfo, const Nd4jLong *imShapeInfo, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW) { const auto col = reinterpret_cast<const T*>(columns); auto im = reinterpret_cast<T*>(image); auto colShape = shape::shapeOf(const_cast<Nd4jLong *>(colShapeInfo)); auto colStride = shape::stride(const_cast<Nd4jLong *>(colShapeInfo)); int colStride0 = colStride[0]; int colStride1 = colStride[1]; int colStride2 = colStride[2]; int colStride3 = colStride[3]; int colStride4 = colStride[4]; int colStride5 = colStride[5]; int kH = colShape[2]; int kW = colShape[3]; auto imShape = shape::shapeOf(const_cast<Nd4jLong *>(imShapeInfo)); auto imOrder = shape::order(const_cast<Nd4jLong *>(imShapeInfo)); auto imStride = shape::stride(const_cast<Nd4jLong *>(imShapeInfo)); int bS = imShape[0]; int iC = imShape[1]; int iH = imShape[2]; int iW = imShape[3]; int oH = colShape[4];//(iH + 2 * pH - kH) / sW + 1; int oW = colShape[5];//(iW + 2 * pW - kW) / sH + 1; int n = bS * iC * iH * iW; //Effective kernel size, accounting for dilation int kHeff = kH + (kH - 1) * (dH - 1); int kWeff = kW + (kW - 1) * (dW - 1); for (int i = (blockDim.x * blockIdx.x) + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { T val = 0; int w_im = i % iW + pW; int h_im = (i / iW) % iH + pH; int c_im = i / (iW * iH); int b = c_im / iC; int c = c_im % iC; // compute the start and end of the output // These are the indexes for dimensions ??? in the 6d col matrix int w_col_start = (w_im < kWeff) ? 0 : (w_im - kWeff) / sW + 1; int w_col_end = nd4j::math::nd4j_min<int>(w_im / sW + 1, oW); int h_col_start = (h_im < kHeff) ? 0 : (h_im - kHeff) / sH + 1; int h_col_end = nd4j::math::nd4j_min<int>(h_im / sH + 1, oH); //Iterate over col entries in the 6d array... these are added up for (int colH = h_col_start; colH < h_col_end; colH += 1) { for (int colW = w_col_start; colW < w_col_end; colW += 1) { int kRow = (h_im - colH * sH); int kCol = (w_im - colW * sW); if(kRow % dH == 0 && kCol % dW == 0){ kRow /= dH; kCol /= dW; int data_col_index = b * colStride0 + c * colStride1 + kRow * colStride2 + kCol * colStride3 + colH * colStride4 + colW * colStride5; val += col[data_col_index]; } } } int i_f = 0; int i_c = i; for (int dim = 3; dim >= 0; dim--) { i_f += (i_c % imShape[dim]) * imStride[dim]; i_c = i_c / imShape[dim]; } im[i_f] = val; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void col2imCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* columns, const Nd4jLong* colShapeInfo, void* image, const Nd4jLong* imShapeInfo, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW) { // col2imCuda2<T><<<512, 512, 1024, *stream>>>(columns, image, colShapeInfo, imShapeInfo, sH, sW, pH, pW, dH, dW); hipLaunchKernelGGL(( col2imCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, columns, colShapeInfo, image, imShapeInfo, sH, sW, pH, pW, dH, dW); } ////////////////////////////////////////////////////////////////////////// void col2im(nd4j::LaunchContext& context, const NDArray& col, NDArray& im, const int sH, const int sW, const int pH, const int pW, const int iH, const int iW, const int dH, const int dW) { PointersManager manager(&context, "col2im"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (im.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = col.rankOf() * sizeof(uint) * threadsPerBlock + 256; NDArray::prepareSpecialUse({&im}, {&col}); BUILD_SINGLE_SELECTOR(im.dataType(), col2imCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context.getCudaStream(), col.getSpecialBuffer(), col.getSpecialShapeInfo(), im.specialBuffer(), im.specialShapeInfo(), sH, sW, pH, pW, dH, dW), FLOAT_TYPES); NDArray::registerSpecialUse({&im}, {&col}); manager.synchronize(); } } } }
ba9ba914bd78243309cda640594df23efe2d5ac7.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected], created on 30.11.17. // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/col2im.h> #include <PointersManager.h> namespace nd4j { namespace ops { namespace helpers { ////////////////////////////////////////////////////////////////////////// // columns [bS, iC, kH, kW, oH, oW] to be de-convoluted to image [bS, iC, iH, iW] template <typename T> static __global__ void col2imCuda(const void* columns, const Nd4jLong* colShapeInfo, void* image, const Nd4jLong* imShapeInfo, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW) { const T* col = reinterpret_cast<const T*>(columns); T* im = reinterpret_cast<T*>(image); __shared__ uint kH, kW, oH, oW, *sharedMem; __shared__ Nd4jLong imLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<uint*>(shmem); kH = dH * (colShapeInfo[3] - 1) + 1; kW = dW * (colShapeInfo[4] - 1) + 1; oH = colShapeInfo[5]; oW = colShapeInfo[6]; imLen = shape::length(imShapeInfo); } __syncthreads(); auto coords = sharedMem + threadIdx.x * 6; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < imLen; i += gridDim.x * blockDim.x) { shape::index2coords(i, imShapeInfo, coords); const auto imOffset = shape::getOffset(imShapeInfo, coords); const auto bSiCoffset = coords[0] * colShapeInfo[7] + coords[1] * colShapeInfo[8]; const uint imH = coords[2] + pH; const uint imW = coords[3] + pW; const uint colHstart = (imH < kH) ? 0 : (imH - kH) / sH + 1; const uint colWstart = (imW < kW) ? 0 : (imW - kW) / sW + 1; const uint colHend = nd4j::math::nd4j_min<uint>(imH / sH + 1, oH); const uint colWend = nd4j::math::nd4j_min<uint>(imW / sW + 1, oW); T val = 0; for(coords[4] = colHstart; coords[4] < colHend; ++coords[4]) { coords[2] = imH - coords[4] * sH; if(coords[2] % dH != 0) continue; for(coords[5] = colWstart; coords[5] < colWend; ++coords[5]) { coords[3] = imW - coords[5] * sW; if(coords[3] % dW != 0) continue; val += col[bSiCoffset + (coords[2]/dH)*colShapeInfo[9] + (coords[3]/dW)*colShapeInfo[10] + coords[4]*colShapeInfo[11] + coords[5]*colShapeInfo[12]]; } } im[imOffset] = val; } } //////////////////////////////////////////////////////////////////////// // columns [bS, iC, kH, kW, oH, oW] to be de-convoluted to image [bS, iC, iH, iW] template<typename T> __global__ static void col2imCuda2(const void *columns, void *image, const Nd4jLong *colShapeInfo, const Nd4jLong *imShapeInfo, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW) { const auto col = reinterpret_cast<const T*>(columns); auto im = reinterpret_cast<T*>(image); auto colShape = shape::shapeOf(const_cast<Nd4jLong *>(colShapeInfo)); auto colStride = shape::stride(const_cast<Nd4jLong *>(colShapeInfo)); int colStride0 = colStride[0]; int colStride1 = colStride[1]; int colStride2 = colStride[2]; int colStride3 = colStride[3]; int colStride4 = colStride[4]; int colStride5 = colStride[5]; int kH = colShape[2]; int kW = colShape[3]; auto imShape = shape::shapeOf(const_cast<Nd4jLong *>(imShapeInfo)); auto imOrder = shape::order(const_cast<Nd4jLong *>(imShapeInfo)); auto imStride = shape::stride(const_cast<Nd4jLong *>(imShapeInfo)); int bS = imShape[0]; int iC = imShape[1]; int iH = imShape[2]; int iW = imShape[3]; int oH = colShape[4];//(iH + 2 * pH - kH) / sW + 1; int oW = colShape[5];//(iW + 2 * pW - kW) / sH + 1; int n = bS * iC * iH * iW; //Effective kernel size, accounting for dilation int kHeff = kH + (kH - 1) * (dH - 1); int kWeff = kW + (kW - 1) * (dW - 1); for (int i = (blockDim.x * blockIdx.x) + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { T val = 0; int w_im = i % iW + pW; int h_im = (i / iW) % iH + pH; int c_im = i / (iW * iH); int b = c_im / iC; int c = c_im % iC; // compute the start and end of the output // These are the indexes for dimensions ??? in the 6d col matrix int w_col_start = (w_im < kWeff) ? 0 : (w_im - kWeff) / sW + 1; int w_col_end = nd4j::math::nd4j_min<int>(w_im / sW + 1, oW); int h_col_start = (h_im < kHeff) ? 0 : (h_im - kHeff) / sH + 1; int h_col_end = nd4j::math::nd4j_min<int>(h_im / sH + 1, oH); //Iterate over col entries in the 6d array... these are added up for (int colH = h_col_start; colH < h_col_end; colH += 1) { for (int colW = w_col_start; colW < w_col_end; colW += 1) { int kRow = (h_im - colH * sH); int kCol = (w_im - colW * sW); if(kRow % dH == 0 && kCol % dW == 0){ kRow /= dH; kCol /= dW; int data_col_index = b * colStride0 + c * colStride1 + kRow * colStride2 + kCol * colStride3 + colH * colStride4 + colW * colStride5; val += col[data_col_index]; } } } int i_f = 0; int i_c = i; for (int dim = 3; dim >= 0; dim--) { i_f += (i_c % imShape[dim]) * imStride[dim]; i_c = i_c / imShape[dim]; } im[i_f] = val; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void col2imCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* columns, const Nd4jLong* colShapeInfo, void* image, const Nd4jLong* imShapeInfo, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW) { // col2imCuda2<T><<<512, 512, 1024, *stream>>>(columns, image, colShapeInfo, imShapeInfo, sH, sW, pH, pW, dH, dW); col2imCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(columns, colShapeInfo, image, imShapeInfo, sH, sW, pH, pW, dH, dW); } ////////////////////////////////////////////////////////////////////////// void col2im(nd4j::LaunchContext& context, const NDArray& col, NDArray& im, const int sH, const int sW, const int pH, const int pW, const int iH, const int iW, const int dH, const int dW) { PointersManager manager(&context, "col2im"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (im.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = col.rankOf() * sizeof(uint) * threadsPerBlock + 256; NDArray::prepareSpecialUse({&im}, {&col}); BUILD_SINGLE_SELECTOR(im.dataType(), col2imCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context.getCudaStream(), col.getSpecialBuffer(), col.getSpecialShapeInfo(), im.specialBuffer(), im.specialShapeInfo(), sH, sW, pH, pW, dH, dW), FLOAT_TYPES); NDArray::registerSpecialUse({&im}, {&col}); manager.synchronize(); } } } }
3e241d8d46a574fdad1d0e7c6233bf1d3c1043fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdio.h> #include "box2d4r-512-4-256_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 161 #define BENCH_RAD 4 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 9 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ hipError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != hipSuccess) { \ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == hipSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(hipGetLastError()); \ } while(0) double *dev_A; cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double))); { cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice)); #ifdef STENCILBENCH hipDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 4 - 4); const AN5D_TYPE __c1Pad = (4); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 4 - 4); const AN5D_TYPE __c2Pad = (4); #define __c2 c2 const AN5D_TYPE __halo1 = 4; const AN5D_TYPE __halo2 = 4; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 3) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 3) { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH hipDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost)); } cudaCheckReturn(hipFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = 0.00930f * A[t%2][i-4][j-4] + 0.00931f * A[t%2][i-4][j-3] + 0.00932f * A[t%2][i-4][j-2] + 0.00933f * A[t%2][i-4][j-1] + 0.00934f * A[t%2][i-4][j] + 0.00935f * A[t%2][i-4][j+1] + 0.00936f * A[t%2][i-4][j+2] + 0.00937f * A[t%2][i-4][j+3] + 0.00938f * A[t%2][i-4][j+4] + 0.00939f * A[t%2][i-3][j-4] + 0.00940f * A[t%2][i-3][j-3] + 0.00941f * A[t%2][i-3][j-2] + 0.00942f * A[t%2][i-3][j-1] + 0.00943f * A[t%2][i-3][j] + 0.00944f * A[t%2][i-3][j+1] + 0.00945f * A[t%2][i-3][j+2] + 0.00946f * A[t%2][i-3][j+3] + 0.00947f * A[t%2][i-3][j+4] + 0.00948f * A[t%2][i-2][j-4] + 0.00949f * A[t%2][i-2][j-3] + 0.00950f * A[t%2][i-2][j-2] + 0.00951f * A[t%2][i-2][j-1] + 0.00952f * A[t%2][i-2][j] + 0.00953f * A[t%2][i-2][j+1] + 0.00954f * A[t%2][i-2][j+2] + 0.00955f * A[t%2][i-2][j+3] + 0.00956f * A[t%2][i-2][j+4] + 0.00957f * A[t%2][i-1][j-4] + 0.00958f * A[t%2][i-1][j-3] + 0.00959f * A[t%2][i-1][j-2] + 0.00960f * A[t%2][i-1][j-1] + 0.00961f * A[t%2][i-1][j] + 0.00962f * A[t%2][i-1][j+1] + 0.00963f * A[t%2][i-1][j+2] + 0.00964f * A[t%2][i-1][j+3] + 0.00965f * A[t%2][i-1][j+4] + 0.00966f * A[t%2][i][j-4] + 0.00967f * A[t%2][i][j-3] + 0.00968f * A[t%2][i][j-2] + 0.00969f * A[t%2][i][j-1] + 0.22400f * A[t%2][i][j] + 0.00971f * A[t%2][i][j+1] + 0.00972f * A[t%2][i][j+2] + 0.00973f * A[t%2][i][j+3] + 0.00974f * A[t%2][i][j+4] + 0.00975f * A[t%2][i+1][j-4] + 0.00976f * A[t%2][i+1][j-3] + 0.00977f * A[t%2][i+1][j-2] + 0.00978f * A[t%2][i+1][j-1] + 0.00979f * A[t%2][i+1][j] + 0.00980f * A[t%2][i+1][j+1] + 0.00981f * A[t%2][i+1][j+2] + 0.00982f * A[t%2][i+1][j+3] + 0.00983f * A[t%2][i+1][j+4] + 0.00984f * A[t%2][i+2][j-4] + 0.00985f * A[t%2][i+2][j-3] + 0.00986f * A[t%2][i+2][j-2] + 0.00987f * A[t%2][i+2][j-1] + 0.00988f * A[t%2][i+2][j] + 0.00989f * A[t%2][i+2][j+1] + 0.00990f * A[t%2][i+2][j+2] + 0.00991f * A[t%2][i+2][j+3] + 0.00992f * A[t%2][i+2][j+4] + 0.00993f * A[t%2][i+3][j-4] + 0.00994f * A[t%2][i+3][j-3] + 0.00995f * A[t%2][i+3][j-2] + 0.00996f * A[t%2][i+3][j-1] + 0.00997f * A[t%2][i+3][j] + 0.00998f * A[t%2][i+3][j+1] + 0.00999f * A[t%2][i+3][j+2] + 0.01000f * A[t%2][i+3][j+3] + 0.01001f * A[t%2][i+3][j+4] + 0.01002f * A[t%2][i+4][j-4] + 0.01003f * A[t%2][i+4][j-3] + 0.01004f * A[t%2][i+4][j-2] + 0.01005f * A[t%2][i+4][j-1] + 0.01006f * A[t%2][i+4][j] + 0.01007f * A[t%2][i+4][j+1] + 0.01008f * A[t%2][i+4][j+2] + 0.01009f * A[t%2][i+4][j+3] + 0.01010f * A[t%2][i+4][j+4]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
3e241d8d46a574fdad1d0e7c6233bf1d3c1043fe.cu
#include <assert.h> #include <stdio.h> #include "box2d4r-512-4-256_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 161 #define BENCH_RAD 4 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 9 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ cudaError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != cudaSuccess) { \ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == cudaSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(cudaGetLastError()); \ } while(0) double *dev_A; cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double))); { cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice)); #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 4 - 4); const AN5D_TYPE __c1Pad = (4); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 4 - 4); const AN5D_TYPE __c2Pad = (4); #define __c2 c2 const AN5D_TYPE __halo1 = 4; const AN5D_TYPE __halo2 = 4; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 3) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 3) { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost)); } cudaCheckReturn(cudaFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = 0.00930f * A[t%2][i-4][j-4] + 0.00931f * A[t%2][i-4][j-3] + 0.00932f * A[t%2][i-4][j-2] + 0.00933f * A[t%2][i-4][j-1] + 0.00934f * A[t%2][i-4][j] + 0.00935f * A[t%2][i-4][j+1] + 0.00936f * A[t%2][i-4][j+2] + 0.00937f * A[t%2][i-4][j+3] + 0.00938f * A[t%2][i-4][j+4] + 0.00939f * A[t%2][i-3][j-4] + 0.00940f * A[t%2][i-3][j-3] + 0.00941f * A[t%2][i-3][j-2] + 0.00942f * A[t%2][i-3][j-1] + 0.00943f * A[t%2][i-3][j] + 0.00944f * A[t%2][i-3][j+1] + 0.00945f * A[t%2][i-3][j+2] + 0.00946f * A[t%2][i-3][j+3] + 0.00947f * A[t%2][i-3][j+4] + 0.00948f * A[t%2][i-2][j-4] + 0.00949f * A[t%2][i-2][j-3] + 0.00950f * A[t%2][i-2][j-2] + 0.00951f * A[t%2][i-2][j-1] + 0.00952f * A[t%2][i-2][j] + 0.00953f * A[t%2][i-2][j+1] + 0.00954f * A[t%2][i-2][j+2] + 0.00955f * A[t%2][i-2][j+3] + 0.00956f * A[t%2][i-2][j+4] + 0.00957f * A[t%2][i-1][j-4] + 0.00958f * A[t%2][i-1][j-3] + 0.00959f * A[t%2][i-1][j-2] + 0.00960f * A[t%2][i-1][j-1] + 0.00961f * A[t%2][i-1][j] + 0.00962f * A[t%2][i-1][j+1] + 0.00963f * A[t%2][i-1][j+2] + 0.00964f * A[t%2][i-1][j+3] + 0.00965f * A[t%2][i-1][j+4] + 0.00966f * A[t%2][i][j-4] + 0.00967f * A[t%2][i][j-3] + 0.00968f * A[t%2][i][j-2] + 0.00969f * A[t%2][i][j-1] + 0.22400f * A[t%2][i][j] + 0.00971f * A[t%2][i][j+1] + 0.00972f * A[t%2][i][j+2] + 0.00973f * A[t%2][i][j+3] + 0.00974f * A[t%2][i][j+4] + 0.00975f * A[t%2][i+1][j-4] + 0.00976f * A[t%2][i+1][j-3] + 0.00977f * A[t%2][i+1][j-2] + 0.00978f * A[t%2][i+1][j-1] + 0.00979f * A[t%2][i+1][j] + 0.00980f * A[t%2][i+1][j+1] + 0.00981f * A[t%2][i+1][j+2] + 0.00982f * A[t%2][i+1][j+3] + 0.00983f * A[t%2][i+1][j+4] + 0.00984f * A[t%2][i+2][j-4] + 0.00985f * A[t%2][i+2][j-3] + 0.00986f * A[t%2][i+2][j-2] + 0.00987f * A[t%2][i+2][j-1] + 0.00988f * A[t%2][i+2][j] + 0.00989f * A[t%2][i+2][j+1] + 0.00990f * A[t%2][i+2][j+2] + 0.00991f * A[t%2][i+2][j+3] + 0.00992f * A[t%2][i+2][j+4] + 0.00993f * A[t%2][i+3][j-4] + 0.00994f * A[t%2][i+3][j-3] + 0.00995f * A[t%2][i+3][j-2] + 0.00996f * A[t%2][i+3][j-1] + 0.00997f * A[t%2][i+3][j] + 0.00998f * A[t%2][i+3][j+1] + 0.00999f * A[t%2][i+3][j+2] + 0.01000f * A[t%2][i+3][j+3] + 0.01001f * A[t%2][i+3][j+4] + 0.01002f * A[t%2][i+4][j-4] + 0.01003f * A[t%2][i+4][j-3] + 0.01004f * A[t%2][i+4][j-2] + 0.01005f * A[t%2][i+4][j-1] + 0.01006f * A[t%2][i+4][j] + 0.01007f * A[t%2][i+4][j+1] + 0.01008f * A[t%2][i+4][j+2] + 0.01009f * A[t%2][i+4][j+3] + 0.01010f * A[t%2][i+4][j+4]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
962b872cce6f8452943784e8add498e191f3ea12.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "./concat_layer.hpp" #include "../util/math_functions.hpp" namespace caffe { __global__ void Concat(const int nthreads, const real_t* in_data, const bool forward, const int num_concats, const int concat_size, const int top_concat_axis, const int bottom_concat_axis, const int offset_concat_axis, real_t* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; const int top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; if (forward) { out_data[top_index] = in_data[index]; } else { out_data[index] = in_data[top_index]; } } } void ConcatLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { if (bottom.size() == 1) { return; } real_t* top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = true; for (int i = 0; i < bottom.size(); ++i) { const real_t* bottom_data = bottom[i]->gpu_data(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; Concat // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); offset_concat_axis += bottom_concat_axis; } } } // namespace caffe
962b872cce6f8452943784e8add498e191f3ea12.cu
#include <vector> #include "./concat_layer.hpp" #include "../util/math_functions.hpp" namespace caffe { __global__ void Concat(const int nthreads, const real_t* in_data, const bool forward, const int num_concats, const int concat_size, const int top_concat_axis, const int bottom_concat_axis, const int offset_concat_axis, real_t* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; const int top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; if (forward) { out_data[top_index] = in_data[index]; } else { out_data[index] = in_data[top_index]; } } } void ConcatLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { if (bottom.size() == 1) { return; } real_t* top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = true; for (int i = 0; i < bottom.size(); ++i) { const real_t* bottom_data = bottom[i]->gpu_data(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; Concat // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, bottom_data, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); offset_concat_axis += bottom_concat_axis; } } } // namespace caffe
7ec06526da33c2c5bbc6c11a6d7265c6de2a5e12.hip
// !!! This is a file automatically generated by hipify!!! #include "DT.cuh" DTChunk::DTChunk(int argmaxDTLength, int argMaxDocLength, int argNumChunks) { maxDTLength = argmaxDTLength; maxDocLength = argMaxDocLength; numChunks = argNumChunks; NZDTCount = new int[maxDocLength]; DTIndex = new unsigned short int[maxDTLength]; DTValue = new int[maxDTLength]; //DTCount = new int[maxDocLength]; //DTOffset = new int[maxDocLength]; DTLengthVec = new int[numChunks]; docLengthVec = new int[numChunks]; } void DTChunk::loadDocDTLength(string argFilePrefix) { ifstream DTLength((argFilePrefix + string("/DTLength.txt")).c_str(), ios::binary);//store max Doc and DT length ifstream docLength((argFilePrefix + string("/docLength.txt")).c_str(), ios::binary);//store max Doc and DT length for (int chunkId = 0; chunkId < numChunks; chunkId++) { DTLength >> DTLengthVec[chunkId]; docLength >> docLengthVec[chunkId]; } DTLength.close(); docLength.close(); } void DTChunk::CPUMemSet() { memset(NZDTCount, 0, maxDocLength * sizeof(int)); memset(DTIndex, 0, maxDTLength * sizeof(unsigned short int)); memset(DTValue, 0, maxDTLength * sizeof(int)); //memset(DTCount, 0, maxDocLength * sizeof(int)); //memset(DTOffset, 0, maxDocLength * sizeof(int)); } void DTChunk::GPUMemAllocate() { hipMalloc((void**)&deviceNZDTCount, (maxDocLength) * sizeof(int)); hipMalloc((void**)&deviceDTIndex, (maxDTLength) * sizeof(unsigned short int)); hipMalloc((void**)&deviceDTValue, (maxDTLength) * sizeof(int)); hipMalloc((void**)&deviceDTCount, (maxDocLength) * sizeof(int)); hipMalloc((void**)&deviceDTOffset, (maxDocLength) * sizeof(int)); DTMemory = (3 * maxDocLength + 2 * maxDTLength) * sizeof(int) / 1000000000.0; printf("DT memory usage:%f GB\n", DTMemory); } void DTChunk::loadDTCountOffset(string argFilePrefix) { /*chunkId = argChunkId;*/ for (int chunkId = 0; chunkId < numChunks; chunkId++) { string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId); ifstream DTCountOffset((chunkFolderName + string("/DTCountOffset.txt")).c_str(), ios::binary);//store Word offset of TL int* DTCount = new int[docLengthVec[chunkId]]; int* DTOffset = new int[docLengthVec[chunkId]]; memset(DTCount, 0, docLengthVec[chunkId] * sizeof(int)); memset(DTOffset, 0, docLengthVec[chunkId] * sizeof(int)); for (int i = 0; i < docLengthVec[chunkId]; i++) { DTCountOffset >> DTCount[i] >> DTOffset[i]; } DTCountOffset.close(); DTCountVec.push_back(DTCount); DTOffsetVec.push_back(DTOffset); } } void DTChunk::CPU2GPU(int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; hipMemcpy(deviceNZDTCount, NZDTCount, (docLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(deviceDTIndex, DTIndex, (DTLengthVec[chunkId]) * sizeof(unsigned short int), hipMemcpyHostToDevice); hipMemcpy(deviceDTValue, DTValue, (DTLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice); } void DTChunk::GPUMemSet(int argChunkId) { chunkId = argChunkId; hipMemset(deviceNZDTCount, 0, (maxDocLength) * sizeof(int)); hipMemset(deviceDTIndex, 0, (maxDTLength) * sizeof(unsigned short int)); hipMemset(deviceDTValue, 0, (maxDTLength) * sizeof(int)); } void DTChunk::CPU2GPUDTCountOffset(int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; hipMemcpy(deviceDTCount, DTCountVec[chunkId], (docLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(deviceDTOffset, DTOffsetVec[chunkId], (docLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice); } void DTChunk::GPU2CPU(int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; hipMemcpy(NZDTCount, deviceNZDTCount, (docLengthVec[chunkId]) * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(DTIndex, deviceDTIndex, (DTLengthVec[chunkId]) * sizeof(unsigned short int), hipMemcpyDeviceToHost); hipMemcpy(DTValue, deviceDTValue, (DTLengthVec[chunkId]) * sizeof(int), hipMemcpyDeviceToHost); } void DTChunk::CPU2Disk(string argFilePrefix,int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId); ofstream OutputNZDTCount((chunkFolderName + string("/NZDTCount.txt")).c_str(), ios::binary); for (int i = 0; i < docLengthVec[chunkId]; i++) { OutputNZDTCount << NZDTCount[i] << "\n"; } OutputNZDTCount.close(); ofstream OutputDTIndexValue((chunkFolderName + string("/DTIndexValue.txt")).c_str(), ios::binary); for (int i = 0; i < DTLengthVec[chunkId]; i++) { OutputDTIndexValue << DTIndex[i] <<" "<<DTValue[i]<< "\n"; } OutputDTIndexValue.close(); }
7ec06526da33c2c5bbc6c11a6d7265c6de2a5e12.cu
#include "DT.cuh" DTChunk::DTChunk(int argmaxDTLength, int argMaxDocLength, int argNumChunks) { maxDTLength = argmaxDTLength; maxDocLength = argMaxDocLength; numChunks = argNumChunks; NZDTCount = new int[maxDocLength]; DTIndex = new unsigned short int[maxDTLength]; DTValue = new int[maxDTLength]; //DTCount = new int[maxDocLength]; //DTOffset = new int[maxDocLength]; DTLengthVec = new int[numChunks]; docLengthVec = new int[numChunks]; } void DTChunk::loadDocDTLength(string argFilePrefix) { ifstream DTLength((argFilePrefix + string("/DTLength.txt")).c_str(), ios::binary);//store max Doc and DT length ifstream docLength((argFilePrefix + string("/docLength.txt")).c_str(), ios::binary);//store max Doc and DT length for (int chunkId = 0; chunkId < numChunks; chunkId++) { DTLength >> DTLengthVec[chunkId]; docLength >> docLengthVec[chunkId]; } DTLength.close(); docLength.close(); } void DTChunk::CPUMemSet() { memset(NZDTCount, 0, maxDocLength * sizeof(int)); memset(DTIndex, 0, maxDTLength * sizeof(unsigned short int)); memset(DTValue, 0, maxDTLength * sizeof(int)); //memset(DTCount, 0, maxDocLength * sizeof(int)); //memset(DTOffset, 0, maxDocLength * sizeof(int)); } void DTChunk::GPUMemAllocate() { cudaMalloc((void**)&deviceNZDTCount, (maxDocLength) * sizeof(int)); cudaMalloc((void**)&deviceDTIndex, (maxDTLength) * sizeof(unsigned short int)); cudaMalloc((void**)&deviceDTValue, (maxDTLength) * sizeof(int)); cudaMalloc((void**)&deviceDTCount, (maxDocLength) * sizeof(int)); cudaMalloc((void**)&deviceDTOffset, (maxDocLength) * sizeof(int)); DTMemory = (3 * maxDocLength + 2 * maxDTLength) * sizeof(int) / 1000000000.0; printf("DT memory usage:%f GB\n", DTMemory); } void DTChunk::loadDTCountOffset(string argFilePrefix) { /*chunkId = argChunkId;*/ for (int chunkId = 0; chunkId < numChunks; chunkId++) { string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId); ifstream DTCountOffset((chunkFolderName + string("/DTCountOffset.txt")).c_str(), ios::binary);//store Word offset of TL int* DTCount = new int[docLengthVec[chunkId]]; int* DTOffset = new int[docLengthVec[chunkId]]; memset(DTCount, 0, docLengthVec[chunkId] * sizeof(int)); memset(DTOffset, 0, docLengthVec[chunkId] * sizeof(int)); for (int i = 0; i < docLengthVec[chunkId]; i++) { DTCountOffset >> DTCount[i] >> DTOffset[i]; } DTCountOffset.close(); DTCountVec.push_back(DTCount); DTOffsetVec.push_back(DTOffset); } } void DTChunk::CPU2GPU(int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; cudaMemcpy(deviceNZDTCount, NZDTCount, (docLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceDTIndex, DTIndex, (DTLengthVec[chunkId]) * sizeof(unsigned short int), cudaMemcpyHostToDevice); cudaMemcpy(deviceDTValue, DTValue, (DTLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice); } void DTChunk::GPUMemSet(int argChunkId) { chunkId = argChunkId; cudaMemset(deviceNZDTCount, 0, (maxDocLength) * sizeof(int)); cudaMemset(deviceDTIndex, 0, (maxDTLength) * sizeof(unsigned short int)); cudaMemset(deviceDTValue, 0, (maxDTLength) * sizeof(int)); } void DTChunk::CPU2GPUDTCountOffset(int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; cudaMemcpy(deviceDTCount, DTCountVec[chunkId], (docLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceDTOffset, DTOffsetVec[chunkId], (docLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice); } void DTChunk::GPU2CPU(int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; cudaMemcpy(NZDTCount, deviceNZDTCount, (docLengthVec[chunkId]) * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(DTIndex, deviceDTIndex, (DTLengthVec[chunkId]) * sizeof(unsigned short int), cudaMemcpyDeviceToHost); cudaMemcpy(DTValue, deviceDTValue, (DTLengthVec[chunkId]) * sizeof(int), cudaMemcpyDeviceToHost); } void DTChunk::CPU2Disk(string argFilePrefix,int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId); ofstream OutputNZDTCount((chunkFolderName + string("/NZDTCount.txt")).c_str(), ios::binary); for (int i = 0; i < docLengthVec[chunkId]; i++) { OutputNZDTCount << NZDTCount[i] << "\n"; } OutputNZDTCount.close(); ofstream OutputDTIndexValue((chunkFolderName + string("/DTIndexValue.txt")).c_str(), ios::binary); for (int i = 0; i < DTLengthVec[chunkId]; i++) { OutputDTIndexValue << DTIndex[i] <<" "<<DTValue[i]<< "\n"; } OutputDTIndexValue.close(); }
7f77adf2063b1a2d8454d9a79f1087e4f16167c0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // // Copyright (c) 2010, Paul Furgale, Chi Hay Tong // // The original code was written by Paul Furgale and Chi Hay Tong // and later optimized and prepared for integration into OpenCV by Itseez. // //M*/ #include "internal_shared.hpp" #include "opencv2/gpu/device/limits.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/utility.hpp" #include "opencv2/gpu/device/functional.hpp" #include "opencv2/gpu/device/filters.hpp" namespace cv { namespace gpu { namespace device { namespace surf { //////////////////////////////////////////////////////////////////////// // Global parameters // The maximum number of features (before subpixel interpolation) that memory is reserved for. __constant__ int c_max_candidates; // The maximum number of features that memory is reserved for. __constant__ int c_max_features; // The image size. __constant__ int c_img_rows; __constant__ int c_img_cols; // The number of layers. __constant__ int c_nOctaveLayers; // The hessian threshold. __constant__ float c_hessianThreshold; // The current octave. __constant__ int c_octave; // The current layer size. __constant__ int c_layer_rows; __constant__ int c_layer_cols; void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold) { cudaSafeCall( hipMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) ); cudaSafeCall( hipMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) ); cudaSafeCall( hipMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) ); cudaSafeCall( hipMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) ); cudaSafeCall( hipMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) ); cudaSafeCall( hipMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) ); } void loadOctaveConstants(int octave, int layer_rows, int layer_cols) { cudaSafeCall( hipMemcpyToSymbol(c_octave, &octave, sizeof(octave)) ); cudaSafeCall( hipMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) ); cudaSafeCall( hipMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) ); } //////////////////////////////////////////////////////////////////////// // Integral image texture texture<unsigned char, 2, hipReadModeElementType> imgTex(0, hipFilterModePoint, hipAddressModeClamp); texture<unsigned int, 2, hipReadModeElementType> sumTex(0, hipFilterModePoint, hipAddressModeClamp); texture<unsigned int, 2, hipReadModeElementType> maskSumTex(0, hipFilterModePoint, hipAddressModeClamp); void bindImgTex(DevMem2Db img) { bindTexture(&imgTex, img); } void bindSumTex(DevMem2D_<uint> sum) { bindTexture(&sumTex, sum); } void bindMaskSumTex(DevMem2D_<uint> maskSum) { bindTexture(&maskSumTex, maskSum); } template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x) { #if __CUDA_ARCH__ >= 200 typedef double real_t; #else typedef float real_t; #endif float ratio = (float)newSize / oldSize; real_t d = 0; #pragma unroll for (int k = 0; k < N; ++k) { int dx1 = __float2int_rn(ratio * src[k][0]); int dy1 = __float2int_rn(ratio * src[k][1]); int dx2 = __float2int_rn(ratio * src[k][2]); int dy2 = __float2int_rn(ratio * src[k][3]); real_t t = 0; t += tex2D(sumTex, x + dx1, y + dy1); t -= tex2D(sumTex, x + dx1, y + dy2); t -= tex2D(sumTex, x + dx2, y + dy1); t += tex2D(sumTex, x + dx2, y + dy2); d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1)); } return (float)d; } //////////////////////////////////////////////////////////////////////// // Hessian __constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} }; __constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} }; __constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} }; __host__ __device__ __forceinline__ int calcSize(int octave, int layer) { /* Wavelet size at first layer of first octave. */ const int HAAR_SIZE0 = 9; /* Wavelet size increment between layers. This should be an even number, such that the wavelet sizes in an octave are either all even or all odd. This ensures that when looking for the neighbours of a sample, the layers above and below are aligned correctly. */ const int HAAR_SIZE_INC = 6; return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave; } __global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace) { // Determine the indices const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2); const int blockIdx_y = blockIdx.y % gridDim_y; const int blockIdx_z = blockIdx.y / gridDim_y; const int j = threadIdx.x + blockIdx.x * blockDim.x; const int i = threadIdx.y + blockIdx_y * blockDim.y; const int layer = blockIdx_z; const int size = calcSize(c_octave, layer); const int samples_i = 1 + ((c_img_rows - size) >> c_octave); const int samples_j = 1 + ((c_img_cols - size) >> c_octave); // Ignore pixels where some of the kernel is outside the image const int margin = (size >> 1) >> c_octave; if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j) { const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, i << c_octave, j << c_octave); const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, i << c_octave, j << c_octave); const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, i << c_octave, j << c_octave); det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy; trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy; } } void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols, int octave, int nOctaveLayers) { const int min_size = calcSize(octave, 0); const int max_samples_i = 1 + ((img_rows - min_size) >> octave); const int max_samples_j = 1 + ((img_cols - min_size) >> octave); dim3 threads(16, 16); dim3 grid; grid.x = divUp(max_samples_j, threads.x); grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2); hipLaunchKernelGGL(( icvCalcLayerDetAndTrace), dim3(grid), dim3(threads), 0, 0, det, trace); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // NONMAX __constant__ float c_DM[5] = {0, 0, 9, 9, 1}; struct WithMask { static __device__ bool check(int sum_i, int sum_j, int size) { float ratio = (float)size / 9.0f; float d = 0; int dx1 = __float2int_rn(ratio * c_DM[0]); int dy1 = __float2int_rn(ratio * c_DM[1]); int dx2 = __float2int_rn(ratio * c_DM[2]); int dy2 = __float2int_rn(ratio * c_DM[3]); float t = 0; t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1); t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2); t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1); t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2); d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1)); return (d >= 0.5f); } }; template <typename Mask> __global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer, unsigned int* maxCounter) { #if __CUDA_ARCH__ >= 110 extern __shared__ float N9[]; // The hidx variables are the indices to the hessian buffer. const int gridDim_y = gridDim.y / c_nOctaveLayers; const int blockIdx_y = blockIdx.y % gridDim_y; const int blockIdx_z = blockIdx.y / gridDim_y; const int layer = blockIdx_z + 1; const int size = calcSize(c_octave, layer); // Ignore pixels without a 3x3x3 neighbourhood in the layer above const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1; const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1; const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1; // Is this thread within the hessian buffer? const int zoff = blockDim.x * blockDim.y; const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff; N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; __syncthreads(); if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1) { float val0 = N9[localLin]; if (val0 > c_hessianThreshold) { // Coordinates for the start of the wavelet in the sum image. There // is some integer division involved, so don't try to simplify this // (cancel out sampleStep) without checking the result is the same const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave; const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave; if (Mask::check(sum_i, sum_j, size)) { // Check to see if we have a max (in its 26 neighbours) const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff] && val0 > N9[localLin - blockDim.x - zoff] && val0 > N9[localLin + 1 - blockDim.x - zoff] && val0 > N9[localLin - 1 - zoff] && val0 > N9[localLin - zoff] && val0 > N9[localLin + 1 - zoff] && val0 > N9[localLin - 1 + blockDim.x - zoff] && val0 > N9[localLin + blockDim.x - zoff] && val0 > N9[localLin + 1 + blockDim.x - zoff] && val0 > N9[localLin - 1 - blockDim.x] && val0 > N9[localLin - blockDim.x] && val0 > N9[localLin + 1 - blockDim.x] && val0 > N9[localLin - 1 ] && val0 > N9[localLin + 1 ] && val0 > N9[localLin - 1 + blockDim.x] && val0 > N9[localLin + blockDim.x] && val0 > N9[localLin + 1 + blockDim.x] && val0 > N9[localLin - 1 - blockDim.x + zoff] && val0 > N9[localLin - blockDim.x + zoff] && val0 > N9[localLin + 1 - blockDim.x + zoff] && val0 > N9[localLin - 1 + zoff] && val0 > N9[localLin + zoff] && val0 > N9[localLin + 1 + zoff] && val0 > N9[localLin - 1 + blockDim.x + zoff] && val0 > N9[localLin + blockDim.x + zoff] && val0 > N9[localLin + 1 + blockDim.x + zoff] ; if(condmax) { unsigned int ind = atomicInc(maxCounter,(unsigned int) -1); if (ind < c_max_candidates) { const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]); maxPosBuffer[ind] = make_int4(j, i, layer, laplacian); } } } } } #endif } void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter, int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers) { const int layer_rows = img_rows >> octave; const int layer_cols = img_cols >> octave; const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1; dim3 threads(16, 16); dim3 grid; grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2); grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers; const size_t smem_size = threads.x * threads.y * 3 * sizeof(float); if (use_mask) hipLaunchKernelGGL(( icvFindMaximaInLayer<WithMask>), dim3(grid), dim3(threads), smem_size, 0, det, trace, maxPosBuffer, maxCounter); else hipLaunchKernelGGL(( icvFindMaximaInLayer<WithOutMask>), dim3(grid), dim3(threads), smem_size, 0, det, trace, maxPosBuffer, maxCounter); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // INTERPOLATION __global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer, float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian, unsigned int* featureCounter) { #if __CUDA_ARCH__ >= 110 const int4 maxPos = maxPosBuffer[blockIdx.x]; const int j = maxPos.x - 1 + threadIdx.x; const int i = maxPos.y - 1 + threadIdx.y; const int layer = maxPos.z - 1 + threadIdx.z; __shared__ float N9[3][3][3]; N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j]; __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { __shared__ float dD[3]; //dx dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]); //dy dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]); //ds dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]); __shared__ float H[3][3]; //dxx H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2]; //dxy H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]); //dxs H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]); //dyx = dxy H[1][0] = H[0][1]; //dyy H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1]; //dys H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]); //dsx = dxs H[2][0] = H[0][2]; //dsy = dys H[2][1] = H[1][2]; //dss H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1]; __shared__ float x[3]; if (solve3x3(H, dD, x)) { if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f) { // if the step is within the interpolation region, perform it const int size = calcSize(c_octave, maxPos.z); const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave; const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave; const float center_i = sum_i + (float)(size - 1) / 2; const float center_j = sum_j + (float)(size - 1) / 2; const float px = center_j + x[0] * (1 << c_octave); const float py = center_i + x[1] * (1 << c_octave); const int ds = size - calcSize(c_octave, maxPos.z - 1); const float psize = roundf(size + x[2] * ds); /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = psize * 1.2f / 9.0f; /* To find the dominant orientation, the gradients in x and y are sampled in a circle of radius 6s using wavelets of size 4s. We ensure the gradient wavelet size is even to ensure the wavelet pattern is balanced and symmetric around its center */ const int grad_wav_size = 2 * __float2int_rn(2.0f * s); // check when grad_wav_size is too big if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size) { // Get a new feature index. unsigned int ind = atomicInc(featureCounter, (unsigned int)-1); if (ind < c_max_features) { featureX[ind] = px; featureY[ind] = py; featureLaplacian[ind] = maxPos.w; featureOctave[ind] = c_octave; featureSize[ind] = psize; featureHessian[ind] = N9[1][1][1]; } } // grad_wav_size check } // If the subpixel interpolation worked } } // If this is thread 0. #endif } void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter, float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian, unsigned int* featureCounter) { dim3 threads; threads.x = 3; threads.y = 3; threads.z = 3; dim3 grid; grid.x = maxCounter; hipLaunchKernelGGL(( icvInterpolateKeypoint), dim3(grid), dim3(threads), 0, 0, det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // Orientation #define ORI_SEARCH_INC 5 #define ORI_WIN 60 #define ORI_SAMPLES 113 __constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6}; __constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0}; __constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f}; __constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}}; __constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}}; __global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir) { __shared__ float s_X[128]; __shared__ float s_Y[128]; __shared__ float s_angle[128]; __shared__ float s_sumx[32 * 4]; __shared__ float s_sumy[32 * 4]; /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = featureSize[blockIdx.x] * 1.2f / 9.0f; /* To find the dominant orientation, the gradients in x and y are sampled in a circle of radius 6s using wavelets of size 4s. We ensure the gradient wavelet size is even to ensure the wavelet pattern is balanced and symmetric around its center */ const int grad_wav_size = 2 * __float2int_rn(2.0f * s); // check when grad_wav_size is too big if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size) return; // Calc X, Y, angle and store it to shared memory const int tid = threadIdx.y * blockDim.x + threadIdx.x; float X = 0.0f, Y = 0.0f, angle = 0.0f; if (tid < ORI_SAMPLES) { const float margin = (float)(grad_wav_size - 1) / 2.0f; const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin); const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin); if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size && x >= 0 && x < (c_img_cols + 1) - grad_wav_size) { X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x); Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x); angle = atan2f(Y, X); if (angle < 0) angle += 2.0f * CV_PI_F; angle *= 180.0f / CV_PI_F; } } s_X[tid] = X; s_Y[tid] = Y; s_angle[tid] = angle; __syncthreads(); float bestx = 0, besty = 0, best_mod = 0; #pragma unroll for (int i = 0; i < 18; ++i) { const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC; float sumx = 0.0f, sumy = 0.0f; int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx = s_X[threadIdx.x]; sumy = s_Y[threadIdx.x]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 32]; sumy += s_Y[threadIdx.x + 32]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 64]; sumy += s_Y[threadIdx.x + 64]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 96]; sumy += s_Y[threadIdx.x + 96]; } device::reduce<32>(s_sumx + threadIdx.y * 32, sumx, threadIdx.x, plus<volatile float>()); device::reduce<32>(s_sumy + threadIdx.y * 32, sumy, threadIdx.x, plus<volatile float>()); const float temp_mod = sumx * sumx + sumy * sumy; if (temp_mod > best_mod) { best_mod = temp_mod; bestx = sumx; besty = sumy; } __syncthreads(); } if (threadIdx.x == 0) { s_X[threadIdx.y] = bestx; s_Y[threadIdx.y] = besty; s_angle[threadIdx.y] = best_mod; } __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0) { int bestIdx = 0; if (s_angle[1] > s_angle[bestIdx]) bestIdx = 1; if (s_angle[2] > s_angle[bestIdx]) bestIdx = 2; if (s_angle[3] > s_angle[bestIdx]) bestIdx = 3; float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]); if (kp_dir < 0) kp_dir += 2.0f * CV_PI_F; kp_dir *= 180.0f / CV_PI_F; featureDir[blockIdx.x] = kp_dir; } } #undef ORI_SEARCH_INC #undef ORI_WIN #undef ORI_SAMPLES void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures) { dim3 threads; threads.x = 32; threads.y = 4; dim3 grid; grid.x = nFeatures; hipLaunchKernelGGL(( icvCalcOrientation), dim3(grid), dim3(threads), 0, 0, featureX, featureY, featureSize, featureDir); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // Descriptors #define PATCH_SZ 20 __constant__ float c_DW[PATCH_SZ * PATCH_SZ] = { 3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f, 8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f, 1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f, 3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f, 0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f, 9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f, 5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f, 3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f }; struct WinReader { typedef uchar elem_type; __device__ __forceinline__ WinReader(float centerX_, float centerY_, float win_offset_, float cos_dir_, float sin_dir_) : centerX(centerX_), centerY(centerY_), win_offset(win_offset_), cos_dir(cos_dir_), sin_dir(sin_dir_) { } __device__ __forceinline__ uchar operator ()(int i, int j) const { float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir; float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir; return tex2D(imgTex, pixel_x, pixel_y); } float centerX; float centerY; float win_offset; float cos_dir; float sin_dir; }; __device__ void calc_dx_dy(float s_dx_bin[25], float s_dy_bin[25], const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { __shared__ float s_PATCH[6][6]; const float centerX = featureX[blockIdx.x]; const float centerY = featureY[blockIdx.x]; const float size = featureSize[blockIdx.x]; const float descriptor_dir = featureDir[blockIdx.x] * (float)(CV_PI_F / 180.0f); /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = size * 1.2f / 9.0f; /* Extract a window of pixels around the keypoint of size 20s */ const int win_size = (int)((PATCH_SZ + 1) * s); float sin_dir; float cos_dir; sincosf(descriptor_dir, &sin_dir, &cos_dir); /* Nearest neighbour version (faster) */ const float win_offset = -(float)(win_size - 1) / 2; // Compute sampling points // since grids are 2D, need to compute xBlock and yBlock indices const int xBlock = (blockIdx.y & 3); // blockIdx.y % 4 const int yBlock = (blockIdx.y >> 2); // floor(blockIdx.y/4) const int xIndex = xBlock * 5 + threadIdx.x; const int yIndex = yBlock * 5 + threadIdx.y; const float icoo = ((float)yIndex / (PATCH_SZ + 1)) * win_size; const float jcoo = ((float)xIndex / (PATCH_SZ + 1)) * win_size; LinearFilter<WinReader> filter(WinReader(centerX, centerY, win_offset, cos_dir, sin_dir)); s_PATCH[threadIdx.y][threadIdx.x] = filter(icoo, jcoo); __syncthreads(); if (threadIdx.x < 5 && threadIdx.y < 5) { const int tid = threadIdx.y * 5 + threadIdx.x; const float dw = c_DW[yIndex * PATCH_SZ + xIndex]; const float vx = (s_PATCH[threadIdx.y ][threadIdx.x + 1] - s_PATCH[threadIdx.y][threadIdx.x] + s_PATCH[threadIdx.y + 1][threadIdx.x + 1] - s_PATCH[threadIdx.y + 1][threadIdx.x ]) * dw; const float vy = (s_PATCH[threadIdx.y + 1][threadIdx.x ] - s_PATCH[threadIdx.y][threadIdx.x] + s_PATCH[threadIdx.y + 1][threadIdx.x + 1] - s_PATCH[threadIdx.y ][threadIdx.x + 1]) * dw; s_dx_bin[tid] = vx; s_dy_bin[tid] = vy; } } __device__ void reduce_sum25(volatile float* sdata1, volatile float* sdata2, volatile float* sdata3, volatile float* sdata4, int tid) { // first step is to reduce from 25 to 16 if (tid < 9) // use 9 threads { sdata1[tid] += sdata1[tid + 16]; sdata2[tid] += sdata2[tid + 16]; sdata3[tid] += sdata3[tid + 16]; sdata4[tid] += sdata4[tid + 16]; } // sum (reduce) from 16 to 1 (unrolled - aligned to a half-warp) if (tid < 8) { sdata1[tid] += sdata1[tid + 8]; sdata1[tid] += sdata1[tid + 4]; sdata1[tid] += sdata1[tid + 2]; sdata1[tid] += sdata1[tid + 1]; sdata2[tid] += sdata2[tid + 8]; sdata2[tid] += sdata2[tid + 4]; sdata2[tid] += sdata2[tid + 2]; sdata2[tid] += sdata2[tid + 1]; sdata3[tid] += sdata3[tid + 8]; sdata3[tid] += sdata3[tid + 4]; sdata3[tid] += sdata3[tid + 2]; sdata3[tid] += sdata3[tid + 1]; sdata4[tid] += sdata4[tid + 8]; sdata4[tid] += sdata4[tid + 4]; sdata4[tid] += sdata4[tid + 2]; sdata4[tid] += sdata4[tid + 1]; } } __global__ void compute_descriptors64(PtrStepf descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { // 2 floats (dx,dy) for each thread (5x5 sample points in each sub-region) __shared__ float sdx[25]; __shared__ float sdy[25]; __shared__ float sdxabs[25]; __shared__ float sdyabs[25]; calc_dx_dy(sdx, sdy, featureX, featureY, featureSize, featureDir); __syncthreads(); const int tid = threadIdx.y * blockDim.x + threadIdx.x; if (tid < 25) { sdxabs[tid] = ::fabs(sdx[tid]); // |dx| array sdyabs[tid] = ::fabs(sdy[tid]); // |dy| array __syncthreads(); reduce_sum25(sdx, sdy, sdxabs, sdyabs, tid); __syncthreads(); float* descriptors_block = descriptors.ptr(blockIdx.x) + (blockIdx.y << 2); // write dx, dy, |dx|, |dy| if (tid == 0) { descriptors_block[0] = sdx[0]; descriptors_block[1] = sdy[0]; descriptors_block[2] = sdxabs[0]; descriptors_block[3] = sdyabs[0]; } } } __global__ void compute_descriptors128(PtrStepf descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { // 2 floats (dx,dy) for each thread (5x5 sample points in each sub-region) __shared__ float sdx[25]; __shared__ float sdy[25]; // sum (reduce) 5x5 area response __shared__ float sd1[25]; __shared__ float sd2[25]; __shared__ float sdabs1[25]; __shared__ float sdabs2[25]; calc_dx_dy(sdx, sdy, featureX, featureY, featureSize, featureDir); __syncthreads(); const int tid = threadIdx.y * blockDim.x + threadIdx.x; if (tid < 25) { if (sdy[tid] >= 0) { sd1[tid] = sdx[tid]; sdabs1[tid] = ::fabs(sdx[tid]); sd2[tid] = 0; sdabs2[tid] = 0; } else { sd1[tid] = 0; sdabs1[tid] = 0; sd2[tid] = sdx[tid]; sdabs2[tid] = ::fabs(sdx[tid]); } __syncthreads(); reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid); __syncthreads(); float* descriptors_block = descriptors.ptr(blockIdx.x) + (blockIdx.y << 3); // write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0) if (tid == 0) { descriptors_block[0] = sd1[0]; descriptors_block[1] = sdabs1[0]; descriptors_block[2] = sd2[0]; descriptors_block[3] = sdabs2[0]; } __syncthreads(); if (sdx[tid] >= 0) { sd1[tid] = sdy[tid]; sdabs1[tid] = ::fabs(sdy[tid]); sd2[tid] = 0; sdabs2[tid] = 0; } else { sd1[tid] = 0; sdabs1[tid] = 0; sd2[tid] = sdy[tid]; sdabs2[tid] = ::fabs(sdy[tid]); } __syncthreads(); reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid); __syncthreads(); // write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0) if (tid == 0) { descriptors_block[4] = sd1[0]; descriptors_block[5] = sdabs1[0]; descriptors_block[6] = sd2[0]; descriptors_block[7] = sdabs2[0]; } } } template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors) { // no need for thread ID float* descriptor_base = descriptors.ptr(blockIdx.x); // read in the unnormalized descriptor values (squared) __shared__ float sqDesc[BLOCK_DIM_X]; const float lookup = descriptor_base[threadIdx.x]; sqDesc[threadIdx.x] = lookup * lookup; __syncthreads(); if (BLOCK_DIM_X >= 128) { if (threadIdx.x < 64) sqDesc[threadIdx.x] += sqDesc[threadIdx.x + 64]; __syncthreads(); } // reduction to get total if (threadIdx.x < 32) { volatile float* smem = sqDesc; smem[threadIdx.x] += smem[threadIdx.x + 32]; smem[threadIdx.x] += smem[threadIdx.x + 16]; smem[threadIdx.x] += smem[threadIdx.x + 8]; smem[threadIdx.x] += smem[threadIdx.x + 4]; smem[threadIdx.x] += smem[threadIdx.x + 2]; smem[threadIdx.x] += smem[threadIdx.x + 1]; } // compute length (square root) __shared__ float len; if (threadIdx.x == 0) { len = sqrtf(sqDesc[0]); } __syncthreads(); // normalize and store in output descriptor_base[threadIdx.x] = lookup / len; } void compute_descriptors_gpu(const DevMem2Df& descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures) { // compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D if (descriptors.cols == 64) { hipLaunchKernelGGL(( compute_descriptors64), dim3(dim3(nFeatures, 16, 1)), dim3(dim3(6, 6, 1)), 0, 0, descriptors, featureX, featureY, featureSize, featureDir); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); hipLaunchKernelGGL(( normalize_descriptors<64>), dim3(dim3(nFeatures, 1, 1)), dim3(dim3(64, 1, 1)), 0, 0, descriptors); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } else { hipLaunchKernelGGL(( compute_descriptors128), dim3(dim3(nFeatures, 16, 1)), dim3(dim3(6, 6, 1)), 0, 0, descriptors, featureX, featureY, featureSize, featureDir); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); hipLaunchKernelGGL(( normalize_descriptors<128>), dim3(dim3(nFeatures, 1, 1)), dim3(dim3(128, 1, 1)), 0, 0, descriptors); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } } } // namespace surf }}} // namespace cv { namespace gpu { namespace device
7f77adf2063b1a2d8454d9a79f1087e4f16167c0.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // // Copyright (c) 2010, Paul Furgale, Chi Hay Tong // // The original code was written by Paul Furgale and Chi Hay Tong // and later optimized and prepared for integration into OpenCV by Itseez. // //M*/ #include "internal_shared.hpp" #include "opencv2/gpu/device/limits.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/utility.hpp" #include "opencv2/gpu/device/functional.hpp" #include "opencv2/gpu/device/filters.hpp" namespace cv { namespace gpu { namespace device { namespace surf { //////////////////////////////////////////////////////////////////////// // Global parameters // The maximum number of features (before subpixel interpolation) that memory is reserved for. __constant__ int c_max_candidates; // The maximum number of features that memory is reserved for. __constant__ int c_max_features; // The image size. __constant__ int c_img_rows; __constant__ int c_img_cols; // The number of layers. __constant__ int c_nOctaveLayers; // The hessian threshold. __constant__ float c_hessianThreshold; // The current octave. __constant__ int c_octave; // The current layer size. __constant__ int c_layer_rows; __constant__ int c_layer_cols; void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold) { cudaSafeCall( cudaMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) ); cudaSafeCall( cudaMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) ); cudaSafeCall( cudaMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) ); cudaSafeCall( cudaMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) ); cudaSafeCall( cudaMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) ); cudaSafeCall( cudaMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) ); } void loadOctaveConstants(int octave, int layer_rows, int layer_cols) { cudaSafeCall( cudaMemcpyToSymbol(c_octave, &octave, sizeof(octave)) ); cudaSafeCall( cudaMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) ); cudaSafeCall( cudaMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) ); } //////////////////////////////////////////////////////////////////////// // Integral image texture texture<unsigned char, 2, cudaReadModeElementType> imgTex(0, cudaFilterModePoint, cudaAddressModeClamp); texture<unsigned int, 2, cudaReadModeElementType> sumTex(0, cudaFilterModePoint, cudaAddressModeClamp); texture<unsigned int, 2, cudaReadModeElementType> maskSumTex(0, cudaFilterModePoint, cudaAddressModeClamp); void bindImgTex(DevMem2Db img) { bindTexture(&imgTex, img); } void bindSumTex(DevMem2D_<uint> sum) { bindTexture(&sumTex, sum); } void bindMaskSumTex(DevMem2D_<uint> maskSum) { bindTexture(&maskSumTex, maskSum); } template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x) { #if __CUDA_ARCH__ >= 200 typedef double real_t; #else typedef float real_t; #endif float ratio = (float)newSize / oldSize; real_t d = 0; #pragma unroll for (int k = 0; k < N; ++k) { int dx1 = __float2int_rn(ratio * src[k][0]); int dy1 = __float2int_rn(ratio * src[k][1]); int dx2 = __float2int_rn(ratio * src[k][2]); int dy2 = __float2int_rn(ratio * src[k][3]); real_t t = 0; t += tex2D(sumTex, x + dx1, y + dy1); t -= tex2D(sumTex, x + dx1, y + dy2); t -= tex2D(sumTex, x + dx2, y + dy1); t += tex2D(sumTex, x + dx2, y + dy2); d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1)); } return (float)d; } //////////////////////////////////////////////////////////////////////// // Hessian __constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} }; __constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} }; __constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} }; __host__ __device__ __forceinline__ int calcSize(int octave, int layer) { /* Wavelet size at first layer of first octave. */ const int HAAR_SIZE0 = 9; /* Wavelet size increment between layers. This should be an even number, such that the wavelet sizes in an octave are either all even or all odd. This ensures that when looking for the neighbours of a sample, the layers above and below are aligned correctly. */ const int HAAR_SIZE_INC = 6; return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave; } __global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace) { // Determine the indices const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2); const int blockIdx_y = blockIdx.y % gridDim_y; const int blockIdx_z = blockIdx.y / gridDim_y; const int j = threadIdx.x + blockIdx.x * blockDim.x; const int i = threadIdx.y + blockIdx_y * blockDim.y; const int layer = blockIdx_z; const int size = calcSize(c_octave, layer); const int samples_i = 1 + ((c_img_rows - size) >> c_octave); const int samples_j = 1 + ((c_img_cols - size) >> c_octave); // Ignore pixels where some of the kernel is outside the image const int margin = (size >> 1) >> c_octave; if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j) { const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, i << c_octave, j << c_octave); const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, i << c_octave, j << c_octave); const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, i << c_octave, j << c_octave); det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy; trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy; } } void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols, int octave, int nOctaveLayers) { const int min_size = calcSize(octave, 0); const int max_samples_i = 1 + ((img_rows - min_size) >> octave); const int max_samples_j = 1 + ((img_cols - min_size) >> octave); dim3 threads(16, 16); dim3 grid; grid.x = divUp(max_samples_j, threads.x); grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2); icvCalcLayerDetAndTrace<<<grid, threads>>>(det, trace); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // NONMAX __constant__ float c_DM[5] = {0, 0, 9, 9, 1}; struct WithMask { static __device__ bool check(int sum_i, int sum_j, int size) { float ratio = (float)size / 9.0f; float d = 0; int dx1 = __float2int_rn(ratio * c_DM[0]); int dy1 = __float2int_rn(ratio * c_DM[1]); int dx2 = __float2int_rn(ratio * c_DM[2]); int dy2 = __float2int_rn(ratio * c_DM[3]); float t = 0; t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1); t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2); t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1); t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2); d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1)); return (d >= 0.5f); } }; template <typename Mask> __global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer, unsigned int* maxCounter) { #if __CUDA_ARCH__ >= 110 extern __shared__ float N9[]; // The hidx variables are the indices to the hessian buffer. const int gridDim_y = gridDim.y / c_nOctaveLayers; const int blockIdx_y = blockIdx.y % gridDim_y; const int blockIdx_z = blockIdx.y / gridDim_y; const int layer = blockIdx_z + 1; const int size = calcSize(c_octave, layer); // Ignore pixels without a 3x3x3 neighbourhood in the layer above const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1; const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1; const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1; // Is this thread within the hessian buffer? const int zoff = blockDim.x * blockDim.y; const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff; N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; __syncthreads(); if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1) { float val0 = N9[localLin]; if (val0 > c_hessianThreshold) { // Coordinates for the start of the wavelet in the sum image. There // is some integer division involved, so don't try to simplify this // (cancel out sampleStep) without checking the result is the same const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave; const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave; if (Mask::check(sum_i, sum_j, size)) { // Check to see if we have a max (in its 26 neighbours) const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff] && val0 > N9[localLin - blockDim.x - zoff] && val0 > N9[localLin + 1 - blockDim.x - zoff] && val0 > N9[localLin - 1 - zoff] && val0 > N9[localLin - zoff] && val0 > N9[localLin + 1 - zoff] && val0 > N9[localLin - 1 + blockDim.x - zoff] && val0 > N9[localLin + blockDim.x - zoff] && val0 > N9[localLin + 1 + blockDim.x - zoff] && val0 > N9[localLin - 1 - blockDim.x] && val0 > N9[localLin - blockDim.x] && val0 > N9[localLin + 1 - blockDim.x] && val0 > N9[localLin - 1 ] && val0 > N9[localLin + 1 ] && val0 > N9[localLin - 1 + blockDim.x] && val0 > N9[localLin + blockDim.x] && val0 > N9[localLin + 1 + blockDim.x] && val0 > N9[localLin - 1 - blockDim.x + zoff] && val0 > N9[localLin - blockDim.x + zoff] && val0 > N9[localLin + 1 - blockDim.x + zoff] && val0 > N9[localLin - 1 + zoff] && val0 > N9[localLin + zoff] && val0 > N9[localLin + 1 + zoff] && val0 > N9[localLin - 1 + blockDim.x + zoff] && val0 > N9[localLin + blockDim.x + zoff] && val0 > N9[localLin + 1 + blockDim.x + zoff] ; if(condmax) { unsigned int ind = atomicInc(maxCounter,(unsigned int) -1); if (ind < c_max_candidates) { const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]); maxPosBuffer[ind] = make_int4(j, i, layer, laplacian); } } } } } #endif } void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter, int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers) { const int layer_rows = img_rows >> octave; const int layer_cols = img_cols >> octave; const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1; dim3 threads(16, 16); dim3 grid; grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2); grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers; const size_t smem_size = threads.x * threads.y * 3 * sizeof(float); if (use_mask) icvFindMaximaInLayer<WithMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter); else icvFindMaximaInLayer<WithOutMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // INTERPOLATION __global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer, float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian, unsigned int* featureCounter) { #if __CUDA_ARCH__ >= 110 const int4 maxPos = maxPosBuffer[blockIdx.x]; const int j = maxPos.x - 1 + threadIdx.x; const int i = maxPos.y - 1 + threadIdx.y; const int layer = maxPos.z - 1 + threadIdx.z; __shared__ float N9[3][3][3]; N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j]; __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { __shared__ float dD[3]; //dx dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]); //dy dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]); //ds dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]); __shared__ float H[3][3]; //dxx H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2]; //dxy H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]); //dxs H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]); //dyx = dxy H[1][0] = H[0][1]; //dyy H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1]; //dys H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]); //dsx = dxs H[2][0] = H[0][2]; //dsy = dys H[2][1] = H[1][2]; //dss H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1]; __shared__ float x[3]; if (solve3x3(H, dD, x)) { if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f) { // if the step is within the interpolation region, perform it const int size = calcSize(c_octave, maxPos.z); const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave; const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave; const float center_i = sum_i + (float)(size - 1) / 2; const float center_j = sum_j + (float)(size - 1) / 2; const float px = center_j + x[0] * (1 << c_octave); const float py = center_i + x[1] * (1 << c_octave); const int ds = size - calcSize(c_octave, maxPos.z - 1); const float psize = roundf(size + x[2] * ds); /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = psize * 1.2f / 9.0f; /* To find the dominant orientation, the gradients in x and y are sampled in a circle of radius 6s using wavelets of size 4s. We ensure the gradient wavelet size is even to ensure the wavelet pattern is balanced and symmetric around its center */ const int grad_wav_size = 2 * __float2int_rn(2.0f * s); // check when grad_wav_size is too big if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size) { // Get a new feature index. unsigned int ind = atomicInc(featureCounter, (unsigned int)-1); if (ind < c_max_features) { featureX[ind] = px; featureY[ind] = py; featureLaplacian[ind] = maxPos.w; featureOctave[ind] = c_octave; featureSize[ind] = psize; featureHessian[ind] = N9[1][1][1]; } } // grad_wav_size check } // If the subpixel interpolation worked } } // If this is thread 0. #endif } void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter, float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian, unsigned int* featureCounter) { dim3 threads; threads.x = 3; threads.y = 3; threads.z = 3; dim3 grid; grid.x = maxCounter; icvInterpolateKeypoint<<<grid, threads>>>(det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // Orientation #define ORI_SEARCH_INC 5 #define ORI_WIN 60 #define ORI_SAMPLES 113 __constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6}; __constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0}; __constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f}; __constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}}; __constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}}; __global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir) { __shared__ float s_X[128]; __shared__ float s_Y[128]; __shared__ float s_angle[128]; __shared__ float s_sumx[32 * 4]; __shared__ float s_sumy[32 * 4]; /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = featureSize[blockIdx.x] * 1.2f / 9.0f; /* To find the dominant orientation, the gradients in x and y are sampled in a circle of radius 6s using wavelets of size 4s. We ensure the gradient wavelet size is even to ensure the wavelet pattern is balanced and symmetric around its center */ const int grad_wav_size = 2 * __float2int_rn(2.0f * s); // check when grad_wav_size is too big if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size) return; // Calc X, Y, angle and store it to shared memory const int tid = threadIdx.y * blockDim.x + threadIdx.x; float X = 0.0f, Y = 0.0f, angle = 0.0f; if (tid < ORI_SAMPLES) { const float margin = (float)(grad_wav_size - 1) / 2.0f; const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin); const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin); if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size && x >= 0 && x < (c_img_cols + 1) - grad_wav_size) { X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x); Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x); angle = atan2f(Y, X); if (angle < 0) angle += 2.0f * CV_PI_F; angle *= 180.0f / CV_PI_F; } } s_X[tid] = X; s_Y[tid] = Y; s_angle[tid] = angle; __syncthreads(); float bestx = 0, besty = 0, best_mod = 0; #pragma unroll for (int i = 0; i < 18; ++i) { const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC; float sumx = 0.0f, sumy = 0.0f; int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx = s_X[threadIdx.x]; sumy = s_Y[threadIdx.x]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 32]; sumy += s_Y[threadIdx.x + 32]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 64]; sumy += s_Y[threadIdx.x + 64]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 96]; sumy += s_Y[threadIdx.x + 96]; } device::reduce<32>(s_sumx + threadIdx.y * 32, sumx, threadIdx.x, plus<volatile float>()); device::reduce<32>(s_sumy + threadIdx.y * 32, sumy, threadIdx.x, plus<volatile float>()); const float temp_mod = sumx * sumx + sumy * sumy; if (temp_mod > best_mod) { best_mod = temp_mod; bestx = sumx; besty = sumy; } __syncthreads(); } if (threadIdx.x == 0) { s_X[threadIdx.y] = bestx; s_Y[threadIdx.y] = besty; s_angle[threadIdx.y] = best_mod; } __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0) { int bestIdx = 0; if (s_angle[1] > s_angle[bestIdx]) bestIdx = 1; if (s_angle[2] > s_angle[bestIdx]) bestIdx = 2; if (s_angle[3] > s_angle[bestIdx]) bestIdx = 3; float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]); if (kp_dir < 0) kp_dir += 2.0f * CV_PI_F; kp_dir *= 180.0f / CV_PI_F; featureDir[blockIdx.x] = kp_dir; } } #undef ORI_SEARCH_INC #undef ORI_WIN #undef ORI_SAMPLES void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures) { dim3 threads; threads.x = 32; threads.y = 4; dim3 grid; grid.x = nFeatures; icvCalcOrientation<<<grid, threads>>>(featureX, featureY, featureSize, featureDir); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // Descriptors #define PATCH_SZ 20 __constant__ float c_DW[PATCH_SZ * PATCH_SZ] = { 3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f, 8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f, 1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f, 3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f, 0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f, 9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f, 5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f, 3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f }; struct WinReader { typedef uchar elem_type; __device__ __forceinline__ WinReader(float centerX_, float centerY_, float win_offset_, float cos_dir_, float sin_dir_) : centerX(centerX_), centerY(centerY_), win_offset(win_offset_), cos_dir(cos_dir_), sin_dir(sin_dir_) { } __device__ __forceinline__ uchar operator ()(int i, int j) const { float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir; float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir; return tex2D(imgTex, pixel_x, pixel_y); } float centerX; float centerY; float win_offset; float cos_dir; float sin_dir; }; __device__ void calc_dx_dy(float s_dx_bin[25], float s_dy_bin[25], const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { __shared__ float s_PATCH[6][6]; const float centerX = featureX[blockIdx.x]; const float centerY = featureY[blockIdx.x]; const float size = featureSize[blockIdx.x]; const float descriptor_dir = featureDir[blockIdx.x] * (float)(CV_PI_F / 180.0f); /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = size * 1.2f / 9.0f; /* Extract a window of pixels around the keypoint of size 20s */ const int win_size = (int)((PATCH_SZ + 1) * s); float sin_dir; float cos_dir; sincosf(descriptor_dir, &sin_dir, &cos_dir); /* Nearest neighbour version (faster) */ const float win_offset = -(float)(win_size - 1) / 2; // Compute sampling points // since grids are 2D, need to compute xBlock and yBlock indices const int xBlock = (blockIdx.y & 3); // blockIdx.y % 4 const int yBlock = (blockIdx.y >> 2); // floor(blockIdx.y/4) const int xIndex = xBlock * 5 + threadIdx.x; const int yIndex = yBlock * 5 + threadIdx.y; const float icoo = ((float)yIndex / (PATCH_SZ + 1)) * win_size; const float jcoo = ((float)xIndex / (PATCH_SZ + 1)) * win_size; LinearFilter<WinReader> filter(WinReader(centerX, centerY, win_offset, cos_dir, sin_dir)); s_PATCH[threadIdx.y][threadIdx.x] = filter(icoo, jcoo); __syncthreads(); if (threadIdx.x < 5 && threadIdx.y < 5) { const int tid = threadIdx.y * 5 + threadIdx.x; const float dw = c_DW[yIndex * PATCH_SZ + xIndex]; const float vx = (s_PATCH[threadIdx.y ][threadIdx.x + 1] - s_PATCH[threadIdx.y][threadIdx.x] + s_PATCH[threadIdx.y + 1][threadIdx.x + 1] - s_PATCH[threadIdx.y + 1][threadIdx.x ]) * dw; const float vy = (s_PATCH[threadIdx.y + 1][threadIdx.x ] - s_PATCH[threadIdx.y][threadIdx.x] + s_PATCH[threadIdx.y + 1][threadIdx.x + 1] - s_PATCH[threadIdx.y ][threadIdx.x + 1]) * dw; s_dx_bin[tid] = vx; s_dy_bin[tid] = vy; } } __device__ void reduce_sum25(volatile float* sdata1, volatile float* sdata2, volatile float* sdata3, volatile float* sdata4, int tid) { // first step is to reduce from 25 to 16 if (tid < 9) // use 9 threads { sdata1[tid] += sdata1[tid + 16]; sdata2[tid] += sdata2[tid + 16]; sdata3[tid] += sdata3[tid + 16]; sdata4[tid] += sdata4[tid + 16]; } // sum (reduce) from 16 to 1 (unrolled - aligned to a half-warp) if (tid < 8) { sdata1[tid] += sdata1[tid + 8]; sdata1[tid] += sdata1[tid + 4]; sdata1[tid] += sdata1[tid + 2]; sdata1[tid] += sdata1[tid + 1]; sdata2[tid] += sdata2[tid + 8]; sdata2[tid] += sdata2[tid + 4]; sdata2[tid] += sdata2[tid + 2]; sdata2[tid] += sdata2[tid + 1]; sdata3[tid] += sdata3[tid + 8]; sdata3[tid] += sdata3[tid + 4]; sdata3[tid] += sdata3[tid + 2]; sdata3[tid] += sdata3[tid + 1]; sdata4[tid] += sdata4[tid + 8]; sdata4[tid] += sdata4[tid + 4]; sdata4[tid] += sdata4[tid + 2]; sdata4[tid] += sdata4[tid + 1]; } } __global__ void compute_descriptors64(PtrStepf descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { // 2 floats (dx,dy) for each thread (5x5 sample points in each sub-region) __shared__ float sdx[25]; __shared__ float sdy[25]; __shared__ float sdxabs[25]; __shared__ float sdyabs[25]; calc_dx_dy(sdx, sdy, featureX, featureY, featureSize, featureDir); __syncthreads(); const int tid = threadIdx.y * blockDim.x + threadIdx.x; if (tid < 25) { sdxabs[tid] = ::fabs(sdx[tid]); // |dx| array sdyabs[tid] = ::fabs(sdy[tid]); // |dy| array __syncthreads(); reduce_sum25(sdx, sdy, sdxabs, sdyabs, tid); __syncthreads(); float* descriptors_block = descriptors.ptr(blockIdx.x) + (blockIdx.y << 2); // write dx, dy, |dx|, |dy| if (tid == 0) { descriptors_block[0] = sdx[0]; descriptors_block[1] = sdy[0]; descriptors_block[2] = sdxabs[0]; descriptors_block[3] = sdyabs[0]; } } } __global__ void compute_descriptors128(PtrStepf descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { // 2 floats (dx,dy) for each thread (5x5 sample points in each sub-region) __shared__ float sdx[25]; __shared__ float sdy[25]; // sum (reduce) 5x5 area response __shared__ float sd1[25]; __shared__ float sd2[25]; __shared__ float sdabs1[25]; __shared__ float sdabs2[25]; calc_dx_dy(sdx, sdy, featureX, featureY, featureSize, featureDir); __syncthreads(); const int tid = threadIdx.y * blockDim.x + threadIdx.x; if (tid < 25) { if (sdy[tid] >= 0) { sd1[tid] = sdx[tid]; sdabs1[tid] = ::fabs(sdx[tid]); sd2[tid] = 0; sdabs2[tid] = 0; } else { sd1[tid] = 0; sdabs1[tid] = 0; sd2[tid] = sdx[tid]; sdabs2[tid] = ::fabs(sdx[tid]); } __syncthreads(); reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid); __syncthreads(); float* descriptors_block = descriptors.ptr(blockIdx.x) + (blockIdx.y << 3); // write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0) if (tid == 0) { descriptors_block[0] = sd1[0]; descriptors_block[1] = sdabs1[0]; descriptors_block[2] = sd2[0]; descriptors_block[3] = sdabs2[0]; } __syncthreads(); if (sdx[tid] >= 0) { sd1[tid] = sdy[tid]; sdabs1[tid] = ::fabs(sdy[tid]); sd2[tid] = 0; sdabs2[tid] = 0; } else { sd1[tid] = 0; sdabs1[tid] = 0; sd2[tid] = sdy[tid]; sdabs2[tid] = ::fabs(sdy[tid]); } __syncthreads(); reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid); __syncthreads(); // write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0) if (tid == 0) { descriptors_block[4] = sd1[0]; descriptors_block[5] = sdabs1[0]; descriptors_block[6] = sd2[0]; descriptors_block[7] = sdabs2[0]; } } } template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors) { // no need for thread ID float* descriptor_base = descriptors.ptr(blockIdx.x); // read in the unnormalized descriptor values (squared) __shared__ float sqDesc[BLOCK_DIM_X]; const float lookup = descriptor_base[threadIdx.x]; sqDesc[threadIdx.x] = lookup * lookup; __syncthreads(); if (BLOCK_DIM_X >= 128) { if (threadIdx.x < 64) sqDesc[threadIdx.x] += sqDesc[threadIdx.x + 64]; __syncthreads(); } // reduction to get total if (threadIdx.x < 32) { volatile float* smem = sqDesc; smem[threadIdx.x] += smem[threadIdx.x + 32]; smem[threadIdx.x] += smem[threadIdx.x + 16]; smem[threadIdx.x] += smem[threadIdx.x + 8]; smem[threadIdx.x] += smem[threadIdx.x + 4]; smem[threadIdx.x] += smem[threadIdx.x + 2]; smem[threadIdx.x] += smem[threadIdx.x + 1]; } // compute length (square root) __shared__ float len; if (threadIdx.x == 0) { len = sqrtf(sqDesc[0]); } __syncthreads(); // normalize and store in output descriptor_base[threadIdx.x] = lookup / len; } void compute_descriptors_gpu(const DevMem2Df& descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures) { // compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D if (descriptors.cols == 64) { compute_descriptors64<<<dim3(nFeatures, 16, 1), dim3(6, 6, 1)>>>(descriptors, featureX, featureY, featureSize, featureDir); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); normalize_descriptors<64><<<dim3(nFeatures, 1, 1), dim3(64, 1, 1)>>>(descriptors); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } else { compute_descriptors128<<<dim3(nFeatures, 16, 1), dim3(6, 6, 1)>>>(descriptors, featureX, featureY, featureSize, featureDir); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); normalize_descriptors<128><<<dim3(nFeatures, 1, 1), dim3(128, 1, 1)>>>(descriptors); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } } } // namespace surf }}} // namespace cv { namespace gpu { namespace device
000ec80913ea5f64733d8b955861eda23f3659cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TILE_DIM 1024 template<typename T> __device__ void argmin(const T* vector, int* result, const int length) { __shared__ T partsVals[TILE_DIM]; __shared__ int partsArgs[TILE_DIM]; int index = threadIdx.x; int partLength = (length + TILE_DIM - 1) / TILE_DIM; T min; int argmin; if (index < length) { min = vector[index]; argmin = index; } for (int i = 1; i < partLength; i++) { int valueIndex = i * TILE_DIM + index; if (valueIndex < length) { T value = vector[valueIndex]; if (value < min) { min = value; argmin = valueIndex; } } } partsVals[index] = min; partsArgs[index] = argmin; int limit = length < TILE_DIM ? length : TILE_DIM; for (int d = 1; d < limit; d <<= 1) { __syncthreads(); if (index % (d << 1) == 0) { int valueIndex = index + d; if (valueIndex < limit) { T value = partsVals[valueIndex]; int arg = partsArgs[valueIndex]; if (value < min) { min = value; partsVals[index] = min; argmin = arg; partsArgs[index] = argmin; } } } } if (index == 0) { result[0] = argmin; } }
000ec80913ea5f64733d8b955861eda23f3659cf.cu
#define TILE_DIM 1024 template<typename T> __device__ void argmin(const T* vector, int* result, const int length) { __shared__ T partsVals[TILE_DIM]; __shared__ int partsArgs[TILE_DIM]; int index = threadIdx.x; int partLength = (length + TILE_DIM - 1) / TILE_DIM; T min; int argmin; if (index < length) { min = vector[index]; argmin = index; } for (int i = 1; i < partLength; i++) { int valueIndex = i * TILE_DIM + index; if (valueIndex < length) { T value = vector[valueIndex]; if (value < min) { min = value; argmin = valueIndex; } } } partsVals[index] = min; partsArgs[index] = argmin; int limit = length < TILE_DIM ? length : TILE_DIM; for (int d = 1; d < limit; d <<= 1) { __syncthreads(); if (index % (d << 1) == 0) { int valueIndex = index + d; if (valueIndex < limit) { T value = partsVals[valueIndex]; int arg = partsArgs[valueIndex]; if (value < min) { min = value; partsVals[index] = min; argmin = arg; partsArgs[index] = argmin; } } } } if (index == 0) { result[0] = argmin; } }
45c232f09fe6590eb5687db841694d1e9533d8f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/replace.h> #include <thrust/functional.h> #include <thrust/sort.h> #include <iostream> #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE RD_WG_SIZE #else #define BLOCK_SIZE 16 #endif #define STR_SIZE 256 /* maximum power density possible (say 300W for a 10mm x 10mm chip) */ #define MAX_PD (3.0e6) /* required precision in degrees */ #define PRECISION 0.001 #define SPEC_HEAT_SI 1.75e6 #define K_SI 100 /* capacitance fitting factor */ #define FACTOR_CHIP 0.5 /* chip parameters */ float t_chip = 0.0005; float chip_height = 0.016; float chip_width = 0.016; /* ambient temperature, assuming no package at all */ float amb_temp = 80.0; void run(int argc, char** argv); /* define timer macros */ #define pin_stats_reset() startCycle() #define pin_stats_pause(cycles) stopCycle(cycles) #define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles) void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } void writeoutput(thrust::host_vector<float> vect, int grid_rows, int grid_cols, char *file){ int i,j, index=0; FILE *fp; char str[STR_SIZE]; if( (fp = fopen(file, "w" )) == 0 ) printf( "The file was not opened\n" ); for (i=0; i < grid_rows; i++) for (j=0; j < grid_cols; j++) { sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]); fputs(str,fp); index++; } fclose(fp); } void readinput(float * vect, int grid_rows, int grid_cols, char *file){ int i,j; FILE *fp; char str[STR_SIZE]; float val; if( (fp = fopen(file, "r" )) ==0 ) printf( "The file was not opened\n" ); for (i=0; i <= grid_rows-1; i++) for (j=0; j <= grid_cols-1; j++) { fgets(str, STR_SIZE, fp); if (feof(fp)) fatal((char *)"not enough lines in file"); //if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1))) if ((sscanf(str, "%f", &val) != 1)) fatal((char *)"invalid file format"); vect[i*grid_cols+j] = val; } fclose(fp); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void calculate_temp(int iteration, //number of iteration float *power, //power input float *temp_src, //temperature input/output float *temp_dst, //temperature input/output int grid_cols, //Col of grid int grid_rows, //Row of grid int border_cols, // border offset int border_rows, // border offset float Cap, //Capacitance float Rx, float Ry, float Rz, float step, float time_elapsed){ __shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result float amb_temp = 80.0; float step_div_Cap; float Rx_1,Ry_1,Rz_1; int bx = blockIdx.x; int by = blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; step_div_Cap=step/Cap; Rx_1=1/Rx; Ry_1=1/Ry; Rz_1=1/Rz; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE // calculate the boundary for the block according to // the boundary of its small block int blkY = small_block_rows*by-border_rows; int blkX = small_block_cols*bx-border_cols; int blkYmax = blkY+BLOCK_SIZE-1; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int yidx = blkY+ty; int xidx = blkX+tx; // load data if it is within the valid input range int loadYidx=yidx, loadXidx=xidx; int index = grid_cols*loadYidx+loadXidx; if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){ temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory } __syncthreads(); // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validYmin = (blkY < 0) ? -blkY : 0; int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1; int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1; int N = ty-1; int S = ty+1; int W = tx-1; int E = tx+1; N = (N < validYmin) ? validYmin : N; S = (S > validYmax) ? validYmax : S; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(tx, validXmin, validXmax) && \ IN_RANGE(ty, validYmin, validYmax) ) { computed = true; temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] + (temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 + (temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 + (amb_temp - temp_on_cuda[ty][tx]) * Rz_1); } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range temp_on_cuda[ty][tx]= temp_t[ty][tx]; __syncthreads(); } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ temp_dst[index]= temp_t[ty][tx]; } } /* compute N time steps */ int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \ int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blockCols, blockRows); printf("Number of Threads = %d \n",BLOCK_SIZE*BLOCK_SIZE*blockCols*blockRows); float grid_height = chip_height / row; float grid_width = chip_width / col; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); float Rz = t_chip / (K_SI * grid_height * grid_width); float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float step = PRECISION / max_slope; float t; float time_elapsed; time_elapsed=0.001; int src = 1, dst = 0; for (t = 0; t < total_iterations; t+=num_iterations) { int temp = src; src = dst; dst = temp; hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\ col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed); } return dst; } class hotspotFunctor { public: __device__ float operator() (float x,int y) { return y; } }; int thrustCompute(thrust::device_vector<float> MatrixPower,thrust::device_vector<float> MatrixTemp[2], int col, int row, \ int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blockCols, blockRows); float grid_height = chip_height / row; float grid_width = chip_width / col; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); float Rz = t_chip / (K_SI * grid_height * grid_width); float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float step = PRECISION / max_slope; float t; float time_elapsed; time_elapsed=0.001; int src = 1, dst = 0; for (t = 0; t < total_iterations; t+=num_iterations) { int temp = src; src = dst; dst = temp; int requiredIterations = MIN(num_iterations,total_iterations-t); int small_block_rows = BLOCK_SIZE-requiredIterations*2;//EXPAND_RATE int small_block_cols = BLOCK_SIZE-requiredIterations*2;//EXPAND_RATE float amb_temp = 80.0; float step_div_Cap; float Rx_1,Ry_1,Rz_1; //Generate id using counting iterator // int bx = blockIdx.x; // int by = blockIdx.y; // // int tx=threadIdx.x; // int ty=threadIdx.y; step_div_Cap=step/Cap; Rx_1=1/Rx; Ry_1=1/Ry; Rz_1=1/Rz; // hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\ col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed); } return dst; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]); fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n"); fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n"); fprintf(stderr, "\t<sim_time> - number of iterations\n"); fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n"); fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n"); fprintf(stderr, "\t<output_file> - name of the output file\n"); exit(1); } int main(int argc, char** argv) { printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { int size; int grid_rows,grid_cols; float *FilesavingTemp,*FilesavingPower,*MatrixOut; char *tfile, *pfile, *ofile; int total_iterations = 60; int pyramid_height = 1; // number of iterations if (argc != 7) usage(argc, argv); if((grid_rows = atoi(argv[1]))<=0|| (grid_cols = atoi(argv[1]))<=0|| (pyramid_height = atoi(argv[2]))<=0|| (total_iterations = atoi(argv[3]))<=0) usage(argc, argv); tfile=argv[4]; pfile=argv[5]; ofile=argv[6]; size=grid_rows*grid_cols; /* --------------- pyramid parameters --------------- */ # define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline int borderCols = (pyramid_height)*EXPAND_RATE/2; int borderRows = (pyramid_height)*EXPAND_RATE/2; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1); int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1); FilesavingTemp = (float *) malloc(size*sizeof(float)); FilesavingPower = (float *) malloc(size*sizeof(float)); MatrixOut = (float *) calloc (size, sizeof(float)); if( !FilesavingPower || !FilesavingTemp || !MatrixOut) fatal((char *)"unable to allocate memory"); printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\ pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow); readinput(FilesavingTemp, grid_rows, grid_cols, tfile); readinput(FilesavingPower, grid_rows, grid_cols, pfile); float *MatrixTemp[2], *MatrixPower; hipMalloc((void**)&MatrixTemp[0], sizeof(float)*size); hipMalloc((void**)&MatrixTemp[1], sizeof(float)*size); thrust::device_vector<float> MatrixTemperature[2]= { thrust::device_vector<float> (size), thrust::device_vector<float> (size), }; hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, hipMemcpyHostToDevice); MatrixTemperature[0].assign(FilesavingTemp,FilesavingTemp+size); hipMalloc((void**)&MatrixPower, sizeof(float)*size); thrust::host_vector<float>HostMatrixPowerVector (FilesavingPower,FilesavingPower+size); thrust::device_vector<float>DeviceMatrixPowerVector = HostMatrixPowerVector; hipMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, hipMemcpyHostToDevice); printf("Start computing the transient temperature\n"); int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \ total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows); printf("Ending simulation\n"); hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, hipMemcpyDeviceToHost); thrust::host_vector<float> MatrixOutput(size); thrust::device_vector<float> resultMatrix(size); resultMatrix.assign(MatrixTemp[ret],MatrixTemp[ret]+size); thrust::device_vector<float> bobby (size); thrust::counting_iterator<int> myCount(0); thrust::transform(resultMatrix.begin(),resultMatrix.end(),myCount,bobby.begin(),hotspotFunctor()); MatrixOutput=bobby; writeoutput(MatrixOutput,grid_rows, grid_cols, ofile); // hipFree(MatrixPower); hipFree(MatrixTemp[0]); hipFree(MatrixTemp[1]); free(MatrixOut); }
45c232f09fe6590eb5687db841694d1e9533d8f8.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/replace.h> #include <thrust/functional.h> #include <thrust/sort.h> #include <iostream> #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE RD_WG_SIZE #else #define BLOCK_SIZE 16 #endif #define STR_SIZE 256 /* maximum power density possible (say 300W for a 10mm x 10mm chip) */ #define MAX_PD (3.0e6) /* required precision in degrees */ #define PRECISION 0.001 #define SPEC_HEAT_SI 1.75e6 #define K_SI 100 /* capacitance fitting factor */ #define FACTOR_CHIP 0.5 /* chip parameters */ float t_chip = 0.0005; float chip_height = 0.016; float chip_width = 0.016; /* ambient temperature, assuming no package at all */ float amb_temp = 80.0; void run(int argc, char** argv); /* define timer macros */ #define pin_stats_reset() startCycle() #define pin_stats_pause(cycles) stopCycle(cycles) #define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles) void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } void writeoutput(thrust::host_vector<float> vect, int grid_rows, int grid_cols, char *file){ int i,j, index=0; FILE *fp; char str[STR_SIZE]; if( (fp = fopen(file, "w" )) == 0 ) printf( "The file was not opened\n" ); for (i=0; i < grid_rows; i++) for (j=0; j < grid_cols; j++) { sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]); fputs(str,fp); index++; } fclose(fp); } void readinput(float * vect, int grid_rows, int grid_cols, char *file){ int i,j; FILE *fp; char str[STR_SIZE]; float val; if( (fp = fopen(file, "r" )) ==0 ) printf( "The file was not opened\n" ); for (i=0; i <= grid_rows-1; i++) for (j=0; j <= grid_cols-1; j++) { fgets(str, STR_SIZE, fp); if (feof(fp)) fatal((char *)"not enough lines in file"); //if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1))) if ((sscanf(str, "%f", &val) != 1)) fatal((char *)"invalid file format"); vect[i*grid_cols+j] = val; } fclose(fp); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void calculate_temp(int iteration, //number of iteration float *power, //power input float *temp_src, //temperature input/output float *temp_dst, //temperature input/output int grid_cols, //Col of grid int grid_rows, //Row of grid int border_cols, // border offset int border_rows, // border offset float Cap, //Capacitance float Rx, float Ry, float Rz, float step, float time_elapsed){ __shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result float amb_temp = 80.0; float step_div_Cap; float Rx_1,Ry_1,Rz_1; int bx = blockIdx.x; int by = blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; step_div_Cap=step/Cap; Rx_1=1/Rx; Ry_1=1/Ry; Rz_1=1/Rz; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE // calculate the boundary for the block according to // the boundary of its small block int blkY = small_block_rows*by-border_rows; int blkX = small_block_cols*bx-border_cols; int blkYmax = blkY+BLOCK_SIZE-1; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int yidx = blkY+ty; int xidx = blkX+tx; // load data if it is within the valid input range int loadYidx=yidx, loadXidx=xidx; int index = grid_cols*loadYidx+loadXidx; if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){ temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory } __syncthreads(); // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validYmin = (blkY < 0) ? -blkY : 0; int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1; int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1; int N = ty-1; int S = ty+1; int W = tx-1; int E = tx+1; N = (N < validYmin) ? validYmin : N; S = (S > validYmax) ? validYmax : S; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(tx, validXmin, validXmax) && \ IN_RANGE(ty, validYmin, validYmax) ) { computed = true; temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] + (temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 + (temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 + (amb_temp - temp_on_cuda[ty][tx]) * Rz_1); } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range temp_on_cuda[ty][tx]= temp_t[ty][tx]; __syncthreads(); } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ temp_dst[index]= temp_t[ty][tx]; } } /* compute N time steps */ int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \ int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blockCols, blockRows); printf("Number of Threads = %d \n",BLOCK_SIZE*BLOCK_SIZE*blockCols*blockRows); float grid_height = chip_height / row; float grid_width = chip_width / col; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); float Rz = t_chip / (K_SI * grid_height * grid_width); float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float step = PRECISION / max_slope; float t; float time_elapsed; time_elapsed=0.001; int src = 1, dst = 0; for (t = 0; t < total_iterations; t+=num_iterations) { int temp = src; src = dst; dst = temp; calculate_temp<<<dimGrid, dimBlock>>>(MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\ col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed); } return dst; } class hotspotFunctor { public: __device__ float operator() (float x,int y) { return y; } }; int thrustCompute(thrust::device_vector<float> MatrixPower,thrust::device_vector<float> MatrixTemp[2], int col, int row, \ int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blockCols, blockRows); float grid_height = chip_height / row; float grid_width = chip_width / col; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); float Rz = t_chip / (K_SI * grid_height * grid_width); float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float step = PRECISION / max_slope; float t; float time_elapsed; time_elapsed=0.001; int src = 1, dst = 0; for (t = 0; t < total_iterations; t+=num_iterations) { int temp = src; src = dst; dst = temp; int requiredIterations = MIN(num_iterations,total_iterations-t); int small_block_rows = BLOCK_SIZE-requiredIterations*2;//EXPAND_RATE int small_block_cols = BLOCK_SIZE-requiredIterations*2;//EXPAND_RATE float amb_temp = 80.0; float step_div_Cap; float Rx_1,Ry_1,Rz_1; //Generate id using counting iterator // int bx = blockIdx.x; // int by = blockIdx.y; // // int tx=threadIdx.x; // int ty=threadIdx.y; step_div_Cap=step/Cap; Rx_1=1/Rx; Ry_1=1/Ry; Rz_1=1/Rz; // calculate_temp<<<dimGrid, dimBlock>>>(MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\ col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed); } return dst; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]); fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n"); fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n"); fprintf(stderr, "\t<sim_time> - number of iterations\n"); fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n"); fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n"); fprintf(stderr, "\t<output_file> - name of the output file\n"); exit(1); } int main(int argc, char** argv) { printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { int size; int grid_rows,grid_cols; float *FilesavingTemp,*FilesavingPower,*MatrixOut; char *tfile, *pfile, *ofile; int total_iterations = 60; int pyramid_height = 1; // number of iterations if (argc != 7) usage(argc, argv); if((grid_rows = atoi(argv[1]))<=0|| (grid_cols = atoi(argv[1]))<=0|| (pyramid_height = atoi(argv[2]))<=0|| (total_iterations = atoi(argv[3]))<=0) usage(argc, argv); tfile=argv[4]; pfile=argv[5]; ofile=argv[6]; size=grid_rows*grid_cols; /* --------------- pyramid parameters --------------- */ # define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline int borderCols = (pyramid_height)*EXPAND_RATE/2; int borderRows = (pyramid_height)*EXPAND_RATE/2; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1); int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1); FilesavingTemp = (float *) malloc(size*sizeof(float)); FilesavingPower = (float *) malloc(size*sizeof(float)); MatrixOut = (float *) calloc (size, sizeof(float)); if( !FilesavingPower || !FilesavingTemp || !MatrixOut) fatal((char *)"unable to allocate memory"); printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\ pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow); readinput(FilesavingTemp, grid_rows, grid_cols, tfile); readinput(FilesavingPower, grid_rows, grid_cols, pfile); float *MatrixTemp[2], *MatrixPower; cudaMalloc((void**)&MatrixTemp[0], sizeof(float)*size); cudaMalloc((void**)&MatrixTemp[1], sizeof(float)*size); thrust::device_vector<float> MatrixTemperature[2]= { thrust::device_vector<float> (size), thrust::device_vector<float> (size), }; cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, cudaMemcpyHostToDevice); MatrixTemperature[0].assign(FilesavingTemp,FilesavingTemp+size); cudaMalloc((void**)&MatrixPower, sizeof(float)*size); thrust::host_vector<float>HostMatrixPowerVector (FilesavingPower,FilesavingPower+size); thrust::device_vector<float>DeviceMatrixPowerVector = HostMatrixPowerVector; cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, cudaMemcpyHostToDevice); printf("Start computing the transient temperature\n"); int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \ total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows); printf("Ending simulation\n"); cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, cudaMemcpyDeviceToHost); thrust::host_vector<float> MatrixOutput(size); thrust::device_vector<float> resultMatrix(size); resultMatrix.assign(MatrixTemp[ret],MatrixTemp[ret]+size); thrust::device_vector<float> bobby (size); thrust::counting_iterator<int> myCount(0); thrust::transform(resultMatrix.begin(),resultMatrix.end(),myCount,bobby.begin(),hotspotFunctor()); MatrixOutput=bobby; writeoutput(MatrixOutput,grid_rows, grid_cols, ofile); // cudaFree(MatrixPower); cudaFree(MatrixTemp[0]); cudaFree(MatrixTemp[1]); free(MatrixOut); }
131ddfb524513929d8b6cdc8f6135cce44e9a09d.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <wb.h> #include <hip/hip_runtime.h> using namespace std; #define IS_WEBCUDA #define HISTOGRAM_LENGTH 256 #define BLOCK_SIZE 512 //@@ You can change this #define wbCheck(stmt) do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ printf("Error:Failed to run stmt %s\n", #stmt); \ return -1; \ } \ } while(0) __global__ void float2uchar(float *input, unsigned char * output, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < size) { output[i] = (unsigned char) (255 * input[i]); } } __global__ void uchar2Float(unsigned char *input, float * output, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < size) { output[i] = ((float) input[i])/255; } } __global__ void ucharCorrect2Float(unsigned char *input, float * output, unsigned int * correction, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < size) { unsigned char a = correction[input[i]]; output[i] = ((float) a)/255; } } __global__ void rgb2gray(unsigned char * ucharImage, unsigned char * grayImage, uint size) { uint i = blockDim.x * blockIdx.x + threadIdx.x; uint rgbIdx = i*3; if(i < size) { uint r = ucharImage[rgbIdx]; uint g = ucharImage[rgbIdx+1]; uint b = ucharImage[rgbIdx+2]; grayImage[i]= (unsigned char) (0.21*r + 0.71*g + 0.07*b); } } void HostHistogram(unsigned char *buffer, long size, unsigned int *histo) { for(long i =0; i<HISTOGRAM_LENGTH; i++) { histo[i]=0; } for(long i =0; i<size; i++) { unsigned int bufferIndex = (unsigned int ) buffer[i]; histo[bufferIndex] += 1; } } __global__ void histo(unsigned char *buffer, long size, unsigned int *histo) { __shared__ unsigned int histo_private[HISTOGRAM_LENGTH]; if(threadIdx.x < HISTOGRAM_LENGTH) histo_private[threadIdx.x] = 0; __syncthreads(); int i = threadIdx.x + blockIdx.x * blockDim.x; // stride is total number of threads int stride = blockDim.x *gridDim.x; while ( i < size) { unsigned int bIdx = (unsigned int)buffer[i]; atomicAdd(&(histo_private[bIdx]), 1); //atomicAdd( &(histo[bIdx]), 1); i += stride; } // wait for all other threads in the block to finish __syncthreads(); if(threadIdx.x < HISTOGRAM_LENGTH) { atomicAdd( &(histo[threadIdx.x]), histo_private[threadIdx.x]); } } __global__ void applyEdgeContibutions(float *input, float *aux, int len) { unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x * BLOCK_SIZE; if (blockIdx.x) { if (start + t < len) input[start + t] += aux[blockIdx.x - 1]; if (start + BLOCK_SIZE + t < len) input[start + BLOCK_SIZE + t] += aux[blockIdx.x - 1]; } } __global__ void scan(float * input, float * output, float *aux, int len) { // Load a segment of the input vector into shared memory __shared__ float scan_array[BLOCK_SIZE << 1]; unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x * BLOCK_SIZE; if (start + t < len) scan_array[t] = input[start + t]; else scan_array[t] = 0; if (start + BLOCK_SIZE + t < len) scan_array[BLOCK_SIZE + t] = input[start + BLOCK_SIZE + t]; else scan_array[BLOCK_SIZE + t] = 0; __syncthreads(); // Reduction int stride; for (stride = 1; stride <= BLOCK_SIZE; stride <<= 1) { int index = (t + 1) * stride * 2 - 1; if (index < 2 * BLOCK_SIZE) scan_array[index] += scan_array[index - stride]; __syncthreads(); } // Post reduction for (stride = BLOCK_SIZE >> 1; stride; stride >>= 1) { int index = (t + 1) * stride * 2 - 1; if (index + stride < 2 * BLOCK_SIZE) scan_array[index + stride] += scan_array[index]; __syncthreads(); } if (start + t < len) output[start + t] = scan_array[t]; if (start + BLOCK_SIZE + t < len) output[start + BLOCK_SIZE + t] = scan_array[BLOCK_SIZE + t]; if (aux && t == 0) aux[blockIdx.x] = scan_array[2 * BLOCK_SIZE - 1]; } void Scan(float * hostInput, float * hostOutput, int numElements) { float * deviceInput; float * deviceOutput; float *deviceEdgeValsArray; float *deviceEdgeValsScanArray; hipHostMalloc(&hostOutput, numElements * sizeof(float), hipHostMallocDefault); hipMalloc((void**)&deviceInput, numElements*sizeof(float)); hipMalloc((void**)&deviceOutput, numElements*sizeof(float)); hipMalloc((void**)&deviceEdgeValsArray, (BLOCK_SIZE << 1) * sizeof(float)); hipMalloc((void**)&deviceEdgeValsScanArray, (BLOCK_SIZE << 1) * sizeof(float)); hipMemset(deviceOutput, 0, numElements*sizeof(float)); hipMemcpy(deviceInput, hostInput, numElements*sizeof(float), hipMemcpyHostToDevice); int numBlocks = ceil((float)numElements/(BLOCK_SIZE<<1)); dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); //@@ Modify this to complete the functionality of the scan //@@ on the deivce hipLaunchKernelGGL(( scan), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInput, deviceOutput, deviceEdgeValsArray, numElements); hipDeviceSynchronize(); hipLaunchKernelGGL(( scan), dim3(dim3(1,1,1)), dim3(dimBlock), 0, 0, deviceEdgeValsArray, deviceEdgeValsScanArray, NULL, BLOCK_SIZE << 1); hipDeviceSynchronize(); hipLaunchKernelGGL(( applyEdgeContibutions), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceOutput, deviceEdgeValsScanArray, numElements); hipDeviceSynchronize(); hipMemcpy(hostOutput, deviceOutput, numElements*sizeof(float), hipMemcpyDeviceToHost); hipFree(deviceInput); hipFree(deviceOutput); hipFree(deviceEdgeValsArray); hipFree(deviceEdgeValsScanArray); } void HostCDF(unsigned int * histo, float * cdf, long numHiostoElems, long numPixels) { cdf[0] = ((float)histo[0])/numPixels; for(int i =1; i< numHiostoElems; i++) { cdf[i] = cdf[i-1] + ((float)histo[i])/numPixels; } } float HostGetMin(float * input, long numElems) { unsigned int minVal = input[0]; for(long i = 0; i < numElems; i++) { if(input[i] < minVal) minVal = input[i]; } return minVal; } unsigned int Clamp(unsigned int x, unsigned int start, unsigned int end) { unsigned int temp =start; if (x > start) temp = x; if(temp > end) temp = end; return temp; } unsigned int CorrectedColorLevel(unsigned int value, float * cdf, float cdfMin) { return Clamp(255*(cdf[value] - cdfMin)/(1-cdfMin), 0, 255); } void HostUcharCorrect2Float(unsigned char *input, float * output, unsigned int * correction, int size) { for(long i = 0; i < size; i++) { unsigned char a = correction[(unsigned int)input[i]]; output[i] = ((float) a)/255.0; } } int main(int argc, char ** argv) { //wbArg_t args; int imageWidth; int imageHeight; int imageChannels; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; const char * inputImageFile; #if defined(IS_WEBCUDA) //@@ Insert more code here wbArg_t args; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); #else inputImageFile = "input.ppm"; #endif inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); hostInputImageData = wbImage_getData(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); int dataSize = imageHeight * imageWidth * imageChannels; unsigned pixels = imageHeight * imageWidth; printf("Input %s width %d height %d channels %d dataSize %d pixels %u\n",inputImageFile, imageWidth, imageHeight, imageChannels, dataSize, pixels); hipError_t err = hipSuccess; unsigned char * ucharArray = (unsigned char *)malloc(dataSize * sizeof(unsigned char)); unsigned char * grayImage = (unsigned char *)malloc(pixels * sizeof(unsigned char)); if(grayImage == NULL) { printf("Cannot malloc grayImage: %ld\n", pixels * sizeof(unsigned char)); return -1; } unsigned int * histoBins = (unsigned int *)malloc(HISTOGRAM_LENGTH * sizeof(unsigned int)); float *d_FA = NULL; unsigned char *d_UA = NULL; unsigned char *d_GI = NULL; unsigned int * d_HD = NULL; wbCheck(hipMalloc((void **)&d_FA, dataSize * sizeof(float))); wbCheck(hipMalloc((void **)&d_UA, dataSize * sizeof(unsigned char))); wbCheck(hipMalloc((void **)&d_GI, pixels * sizeof(unsigned char))); wbCheck(hipMalloc((void **)&d_HD, HISTOGRAM_LENGTH * sizeof(unsigned int))); wbCheck(hipMemcpy(d_FA, hostInputImageData, dataSize*sizeof(float), hipMemcpyHostToDevice)); // Launch the CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(dataSize + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA float2uchar launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( float2uchar), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_FA, d_UA, dataSize); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed convert float to uchar (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } wbCheck(hipMemcpy(ucharArray, d_UA, dataSize * sizeof(unsigned char), hipMemcpyDeviceToHost)); // Diagnostics #if defined(IS_DEBUG) for(int i =0; i < 10; i++) { printf("%f : %u : %u\n", hostInputImageData[i], ucharArray[i], (unsigned char) (255 * hostInputImageData[i])); } for(int i =0; i < dataSize; i++) { if((unsigned char) (255 * hostInputImageData[i]) != ucharArray[i]) { printf("Error pos %d : %f : %u : %u\n", i, hostInputImageData[i], ucharArray[i], (unsigned char) (255 * hostInputImageData[i])); } } #endif // Launch the CUDA Kernel - gray scale threadsPerBlock = 256; blocksPerGrid =(pixels + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); err = hipMemcpy(grayImage, d_GI, pixels * sizeof(unsigned char), hipMemcpyDeviceToHost); if (err != hipSuccess) { printf("Failed first hipMemcpy(grayImage.. (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( rgb2gray), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_UA, d_GI, dataSize); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed greyImage (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(grayImage, d_GI, pixels * sizeof(unsigned char), hipMemcpyDeviceToHost); if (err != hipSuccess) { printf("Failed hipMemcpy(grayImage.. (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Diagnostics #if defined(IS_DEBUG) for(int i =0; i < 24; i++) { printf("%d : %u : %u\n", i, ucharArray[i], grayImage[i]); } #endif // Launch the CUDA Kernel - histo threadsPerBlock = 512; blocksPerGrid =(imageHeight * imageWidth + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel histo launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( histo), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_GI, imageHeight * imageWidth, d_HD); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed histo (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } wbCheck(hipMemcpy(histoBins, d_HD, HISTOGRAM_LENGTH * sizeof(unsigned int), hipMemcpyDeviceToHost)); // Diagnostics #if defined(IS_DEBUG) for(int i =0; i < HISTOGRAM_LENGTH; i++) { printf("%d %u\n", i, histoBins[i]); } unsigned int * testHistoBins = (unsigned int *)malloc(HISTOGRAM_LENGTH * sizeof(unsigned int)); printf("call host calc\n"); HostHistogram(grayImage, imageHeight * imageWidth, testHistoBins); printf("compare kernel and host calc\n"); long sumHost = 0; long sumGpu = 0; for(int i =0; i < HISTOGRAM_LENGTH; i++) { sumHost += testHistoBins[i]; sumGpu += histoBins[i]; if(histoBins[i] != testHistoBins[i]) { printf("Error: index: %d GPUBins: %u TestBins: %u\n", i, histoBins[i], testHistoBins[i]); } } printf("Height:%d Width%d Sum host = %ld SumGpu = %ld\n", imageHeight, imageWidth, sumHost, sumGpu); #endif printf("Get CDF\n"); float * cdf = (float *)malloc(HISTOGRAM_LENGTH * sizeof(float)); HostCDF(histoBins, cdf, HISTOGRAM_LENGTH, imageHeight * imageWidth); float cdfMin = HostGetMin(cdf, HISTOGRAM_LENGTH); #if defined(IS_DEBUG) for(int i =0; i < HISTOGRAM_LENGTH; i++) { printf("cdf: %d:%f\n", i, cdf[i]); } printf("min cdf: %f\n", cdfMin); #endif // should be a kernel to create array of corrections printf("get host correction array\n"); unsigned int * correction = (unsigned int *) malloc(HISTOGRAM_LENGTH * sizeof(unsigned int)); for(int i = 0; i < HISTOGRAM_LENGTH; i++) { correction[i] = CorrectedColorLevel((unsigned int) i, cdf, cdfMin); } #if defined(IS_DEBUG) for(int i =0; i < HISTOGRAM_LENGTH; i++) { printf("correction: %d:%u\n", i, correction[i]); } #endif printf("Attempt correction\n"); hostOutputImageData = wbImage_getData(outputImage); HostUcharCorrect2Float(ucharArray, hostOutputImageData, correction, dataSize); //void ucharCorrect2Float(unsigned char *input, hostOutputImageData, unsigned int * correction, int size) // Diagnostics #if defined(IS_DEBUG) printf("Compare with expected output image\n"); wbImage_t testImage; float * hostTestImageData; const char * testImageFile = "output.ppm"; testImage = wbImport(testImageFile); hostTestImageData = wbImage_getData(testImage); hostInputImageData = wbImage_getData(inputImage); long errorCount =0; for(long i =0; i < dataSize; i++) { if(hostTestImageData[i] != hostOutputImageData[i]) { errorCount++; if(errorCount < 30) { printf("i: %ld input:%f uchar:%u expected:%f actual:%f\n", i, hostInputImageData[i], ucharArray[i], hostTestImageData[i],hostOutputImageData[i]); } } } if(errorCount >0) { printf("output and test image do not match %ld \n", errorCount); } wbPPM_export("blag.ppm", outputImage); #endif #if defined(IS_WEBCUDA) //@@ Insert more code here wbSolution(args, outputImage); #endif err = hipFree(d_FA); err = hipFree(d_UA); err = hipFree(d_GI); err = hipFree(d_HD); free(ucharArray); free(grayImage); free(histoBins); }
131ddfb524513929d8b6cdc8f6135cce44e9a09d.cu
#include <iostream> #include <wb.h> #include <cuda_runtime.h> using namespace std; #define IS_WEBCUDA #define HISTOGRAM_LENGTH 256 #define BLOCK_SIZE 512 //@@ You can change this #define wbCheck(stmt) do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ printf("Error:Failed to run stmt %s\n", #stmt); \ return -1; \ } \ } while(0) __global__ void float2uchar(float *input, unsigned char * output, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < size) { output[i] = (unsigned char) (255 * input[i]); } } __global__ void uchar2Float(unsigned char *input, float * output, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < size) { output[i] = ((float) input[i])/255; } } __global__ void ucharCorrect2Float(unsigned char *input, float * output, unsigned int * correction, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < size) { unsigned char a = correction[input[i]]; output[i] = ((float) a)/255; } } __global__ void rgb2gray(unsigned char * ucharImage, unsigned char * grayImage, uint size) { uint i = blockDim.x * blockIdx.x + threadIdx.x; uint rgbIdx = i*3; if(i < size) { uint r = ucharImage[rgbIdx]; uint g = ucharImage[rgbIdx+1]; uint b = ucharImage[rgbIdx+2]; grayImage[i]= (unsigned char) (0.21*r + 0.71*g + 0.07*b); } } void HostHistogram(unsigned char *buffer, long size, unsigned int *histo) { for(long i =0; i<HISTOGRAM_LENGTH; i++) { histo[i]=0; } for(long i =0; i<size; i++) { unsigned int bufferIndex = (unsigned int ) buffer[i]; histo[bufferIndex] += 1; } } __global__ void histo(unsigned char *buffer, long size, unsigned int *histo) { __shared__ unsigned int histo_private[HISTOGRAM_LENGTH]; if(threadIdx.x < HISTOGRAM_LENGTH) histo_private[threadIdx.x] = 0; __syncthreads(); int i = threadIdx.x + blockIdx.x * blockDim.x; // stride is total number of threads int stride = blockDim.x *gridDim.x; while ( i < size) { unsigned int bIdx = (unsigned int)buffer[i]; atomicAdd(&(histo_private[bIdx]), 1); //atomicAdd( &(histo[bIdx]), 1); i += stride; } // wait for all other threads in the block to finish __syncthreads(); if(threadIdx.x < HISTOGRAM_LENGTH) { atomicAdd( &(histo[threadIdx.x]), histo_private[threadIdx.x]); } } __global__ void applyEdgeContibutions(float *input, float *aux, int len) { unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x * BLOCK_SIZE; if (blockIdx.x) { if (start + t < len) input[start + t] += aux[blockIdx.x - 1]; if (start + BLOCK_SIZE + t < len) input[start + BLOCK_SIZE + t] += aux[blockIdx.x - 1]; } } __global__ void scan(float * input, float * output, float *aux, int len) { // Load a segment of the input vector into shared memory __shared__ float scan_array[BLOCK_SIZE << 1]; unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x * BLOCK_SIZE; if (start + t < len) scan_array[t] = input[start + t]; else scan_array[t] = 0; if (start + BLOCK_SIZE + t < len) scan_array[BLOCK_SIZE + t] = input[start + BLOCK_SIZE + t]; else scan_array[BLOCK_SIZE + t] = 0; __syncthreads(); // Reduction int stride; for (stride = 1; stride <= BLOCK_SIZE; stride <<= 1) { int index = (t + 1) * stride * 2 - 1; if (index < 2 * BLOCK_SIZE) scan_array[index] += scan_array[index - stride]; __syncthreads(); } // Post reduction for (stride = BLOCK_SIZE >> 1; stride; stride >>= 1) { int index = (t + 1) * stride * 2 - 1; if (index + stride < 2 * BLOCK_SIZE) scan_array[index + stride] += scan_array[index]; __syncthreads(); } if (start + t < len) output[start + t] = scan_array[t]; if (start + BLOCK_SIZE + t < len) output[start + BLOCK_SIZE + t] = scan_array[BLOCK_SIZE + t]; if (aux && t == 0) aux[blockIdx.x] = scan_array[2 * BLOCK_SIZE - 1]; } void Scan(float * hostInput, float * hostOutput, int numElements) { float * deviceInput; float * deviceOutput; float *deviceEdgeValsArray; float *deviceEdgeValsScanArray; cudaHostAlloc(&hostOutput, numElements * sizeof(float), cudaHostAllocDefault); cudaMalloc((void**)&deviceInput, numElements*sizeof(float)); cudaMalloc((void**)&deviceOutput, numElements*sizeof(float)); cudaMalloc((void**)&deviceEdgeValsArray, (BLOCK_SIZE << 1) * sizeof(float)); cudaMalloc((void**)&deviceEdgeValsScanArray, (BLOCK_SIZE << 1) * sizeof(float)); cudaMemset(deviceOutput, 0, numElements*sizeof(float)); cudaMemcpy(deviceInput, hostInput, numElements*sizeof(float), cudaMemcpyHostToDevice); int numBlocks = ceil((float)numElements/(BLOCK_SIZE<<1)); dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); //@@ Modify this to complete the functionality of the scan //@@ on the deivce scan<<<dimGrid, dimBlock>>>(deviceInput, deviceOutput, deviceEdgeValsArray, numElements); cudaDeviceSynchronize(); scan<<<dim3(1,1,1), dimBlock>>>(deviceEdgeValsArray, deviceEdgeValsScanArray, NULL, BLOCK_SIZE << 1); cudaDeviceSynchronize(); applyEdgeContibutions<<<dimGrid, dimBlock>>>(deviceOutput, deviceEdgeValsScanArray, numElements); cudaDeviceSynchronize(); cudaMemcpy(hostOutput, deviceOutput, numElements*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(deviceInput); cudaFree(deviceOutput); cudaFree(deviceEdgeValsArray); cudaFree(deviceEdgeValsScanArray); } void HostCDF(unsigned int * histo, float * cdf, long numHiostoElems, long numPixels) { cdf[0] = ((float)histo[0])/numPixels; for(int i =1; i< numHiostoElems; i++) { cdf[i] = cdf[i-1] + ((float)histo[i])/numPixels; } } float HostGetMin(float * input, long numElems) { unsigned int minVal = input[0]; for(long i = 0; i < numElems; i++) { if(input[i] < minVal) minVal = input[i]; } return minVal; } unsigned int Clamp(unsigned int x, unsigned int start, unsigned int end) { unsigned int temp =start; if (x > start) temp = x; if(temp > end) temp = end; return temp; } unsigned int CorrectedColorLevel(unsigned int value, float * cdf, float cdfMin) { return Clamp(255*(cdf[value] - cdfMin)/(1-cdfMin), 0, 255); } void HostUcharCorrect2Float(unsigned char *input, float * output, unsigned int * correction, int size) { for(long i = 0; i < size; i++) { unsigned char a = correction[(unsigned int)input[i]]; output[i] = ((float) a)/255.0; } } int main(int argc, char ** argv) { //wbArg_t args; int imageWidth; int imageHeight; int imageChannels; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; const char * inputImageFile; #if defined(IS_WEBCUDA) //@@ Insert more code here wbArg_t args; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); #else inputImageFile = "input.ppm"; #endif inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); hostInputImageData = wbImage_getData(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); int dataSize = imageHeight * imageWidth * imageChannels; unsigned pixels = imageHeight * imageWidth; printf("Input %s width %d height %d channels %d dataSize %d pixels %u\n",inputImageFile, imageWidth, imageHeight, imageChannels, dataSize, pixels); cudaError_t err = cudaSuccess; unsigned char * ucharArray = (unsigned char *)malloc(dataSize * sizeof(unsigned char)); unsigned char * grayImage = (unsigned char *)malloc(pixels * sizeof(unsigned char)); if(grayImage == NULL) { printf("Cannot malloc grayImage: %ld\n", pixels * sizeof(unsigned char)); return -1; } unsigned int * histoBins = (unsigned int *)malloc(HISTOGRAM_LENGTH * sizeof(unsigned int)); float *d_FA = NULL; unsigned char *d_UA = NULL; unsigned char *d_GI = NULL; unsigned int * d_HD = NULL; wbCheck(cudaMalloc((void **)&d_FA, dataSize * sizeof(float))); wbCheck(cudaMalloc((void **)&d_UA, dataSize * sizeof(unsigned char))); wbCheck(cudaMalloc((void **)&d_GI, pixels * sizeof(unsigned char))); wbCheck(cudaMalloc((void **)&d_HD, HISTOGRAM_LENGTH * sizeof(unsigned int))); wbCheck(cudaMemcpy(d_FA, hostInputImageData, dataSize*sizeof(float), cudaMemcpyHostToDevice)); // Launch the CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(dataSize + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA float2uchar launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); float2uchar<<<blocksPerGrid, threadsPerBlock>>>(d_FA, d_UA, dataSize); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed convert float to uchar (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } wbCheck(cudaMemcpy(ucharArray, d_UA, dataSize * sizeof(unsigned char), cudaMemcpyDeviceToHost)); // Diagnostics #if defined(IS_DEBUG) for(int i =0; i < 10; i++) { printf("%f : %u : %u\n", hostInputImageData[i], ucharArray[i], (unsigned char) (255 * hostInputImageData[i])); } for(int i =0; i < dataSize; i++) { if((unsigned char) (255 * hostInputImageData[i]) != ucharArray[i]) { printf("Error pos %d : %f : %u : %u\n", i, hostInputImageData[i], ucharArray[i], (unsigned char) (255 * hostInputImageData[i])); } } #endif // Launch the CUDA Kernel - gray scale threadsPerBlock = 256; blocksPerGrid =(pixels + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); err = cudaMemcpy(grayImage, d_GI, pixels * sizeof(unsigned char), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { printf("Failed first cudaMemcpy(grayImage.. (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } rgb2gray<<<blocksPerGrid, threadsPerBlock>>>(d_UA, d_GI, dataSize); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed greyImage (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(grayImage, d_GI, pixels * sizeof(unsigned char), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { printf("Failed cudaMemcpy(grayImage.. (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Diagnostics #if defined(IS_DEBUG) for(int i =0; i < 24; i++) { printf("%d : %u : %u\n", i, ucharArray[i], grayImage[i]); } #endif // Launch the CUDA Kernel - histo threadsPerBlock = 512; blocksPerGrid =(imageHeight * imageWidth + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel histo launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); histo<<<blocksPerGrid, threadsPerBlock>>>(d_GI, imageHeight * imageWidth, d_HD); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed histo (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } wbCheck(cudaMemcpy(histoBins, d_HD, HISTOGRAM_LENGTH * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // Diagnostics #if defined(IS_DEBUG) for(int i =0; i < HISTOGRAM_LENGTH; i++) { printf("%d %u\n", i, histoBins[i]); } unsigned int * testHistoBins = (unsigned int *)malloc(HISTOGRAM_LENGTH * sizeof(unsigned int)); printf("call host calc\n"); HostHistogram(grayImage, imageHeight * imageWidth, testHistoBins); printf("compare kernel and host calc\n"); long sumHost = 0; long sumGpu = 0; for(int i =0; i < HISTOGRAM_LENGTH; i++) { sumHost += testHistoBins[i]; sumGpu += histoBins[i]; if(histoBins[i] != testHistoBins[i]) { printf("Error: index: %d GPUBins: %u TestBins: %u\n", i, histoBins[i], testHistoBins[i]); } } printf("Height:%d Width%d Sum host = %ld SumGpu = %ld\n", imageHeight, imageWidth, sumHost, sumGpu); #endif printf("Get CDF\n"); float * cdf = (float *)malloc(HISTOGRAM_LENGTH * sizeof(float)); HostCDF(histoBins, cdf, HISTOGRAM_LENGTH, imageHeight * imageWidth); float cdfMin = HostGetMin(cdf, HISTOGRAM_LENGTH); #if defined(IS_DEBUG) for(int i =0; i < HISTOGRAM_LENGTH; i++) { printf("cdf: %d:%f\n", i, cdf[i]); } printf("min cdf: %f\n", cdfMin); #endif // should be a kernel to create array of corrections printf("get host correction array\n"); unsigned int * correction = (unsigned int *) malloc(HISTOGRAM_LENGTH * sizeof(unsigned int)); for(int i = 0; i < HISTOGRAM_LENGTH; i++) { correction[i] = CorrectedColorLevel((unsigned int) i, cdf, cdfMin); } #if defined(IS_DEBUG) for(int i =0; i < HISTOGRAM_LENGTH; i++) { printf("correction: %d:%u\n", i, correction[i]); } #endif printf("Attempt correction\n"); hostOutputImageData = wbImage_getData(outputImage); HostUcharCorrect2Float(ucharArray, hostOutputImageData, correction, dataSize); //void ucharCorrect2Float(unsigned char *input, hostOutputImageData, unsigned int * correction, int size) // Diagnostics #if defined(IS_DEBUG) printf("Compare with expected output image\n"); wbImage_t testImage; float * hostTestImageData; const char * testImageFile = "output.ppm"; testImage = wbImport(testImageFile); hostTestImageData = wbImage_getData(testImage); hostInputImageData = wbImage_getData(inputImage); long errorCount =0; for(long i =0; i < dataSize; i++) { if(hostTestImageData[i] != hostOutputImageData[i]) { errorCount++; if(errorCount < 30) { printf("i: %ld input:%f uchar:%u expected:%f actual:%f\n", i, hostInputImageData[i], ucharArray[i], hostTestImageData[i],hostOutputImageData[i]); } } } if(errorCount >0) { printf("output and test image do not match %ld \n", errorCount); } wbPPM_export("blag.ppm", outputImage); #endif #if defined(IS_WEBCUDA) //@@ Insert more code here wbSolution(args, outputImage); #endif err = cudaFree(d_FA); err = cudaFree(d_UA); err = cudaFree(d_GI); err = cudaFree(d_HD); free(ucharArray); free(grayImage); free(histoBins); }
e42110cc33af22a925f8a4b579d530d232345f90.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include"fd_cuda_lib.c" extern "C"{ __global__ void kernel_v(double *dev_vx, int BD_nx_vx, int BD_nz_vx, double *dev_vy, int BD_nx_vy, int BD_nz_vy, double *dev_vz, int BD_nx_vz, int BD_nz_vz, double *dev_tpp, int BD_nx_tpp, int BD_nz_tpp, double *dev_rho, double *dev_fdc, double dx, double dy, double dz, double dt, int chunk_full, int chunk_half) { int tX = threadIdx.x + blockIdx.x*blockDim.x; int tY = threadIdx.y + blockIdx.y*blockDim.y; int tZ = threadIdx.z + blockIdx.z*blockDim.z; int tid_vx, tid_vy, tid_vz; int tid_tpp_0, tid_tpp_1, tid_tpp_2, tid_tpp_3; int tid_rho_0, tid_rho_1; double tmp, tmp_rho; //***************** vx ********************// tid_vx = tZ + tX*BD_nz_vx + tY*BD_nx_vx*BD_nz_vx; tid_rho_0 = tZ + (tX+0)*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_rho_1 = tZ + (tX+1)*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; //***************** vx by txx ********************// if(tX < BD_nx_vx-1 && tX > 0 && tY < chunk_full && tZ < BD_nz_vx) { tmp_rho = (0.5*(*(dev_rho + tid_rho_0) + *(dev_rho + tid_rho_1)))/dt; tid_tpp_0 = tZ + (tX-1)*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_tpp_1 = tZ + (tX+0)*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_tpp_2 = tZ + (tX+1)*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_tpp_3 = tZ + (tX+2)*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; *(dev_vx+tid_vx) = *(dev_vx+tid_vx) + ((*(dev_txx+tid_tpp_0)* *(dev_fdc) + *(dev_txx+tid_tpp_1)* *(dev_fdc+1) + *(dev_txx+tid_tpp_2)* *(dev_fdc+2) + *(dev_txx+tid_tpp_3)* *(dev_fdc+3)) / dx)/ tmp_rho; } //***************** vy ********************// tid_vy = tZ + tX*BD_nz_vy + tY*BD_nx_vy*BD_nz_vy; tid_rho_0 = tZ + tX*BD_nz_tpp + (tY+0)*BD_nx_tpp*BD_nz_tpp; tid_rho_1 = tZ + tX*BD_nz_tpp + (tY+1)*BD_nx_tpp*BD_nz_tpp; //***************** vy by tyy********************// if(tX < BD_nx_vy && tY < chunk_half && tZ < BD_nz_vy) { tmp_rho = (0.5*(*(dev_rho + tid_rho_0) + *(dev_rho + tid_rho_1)))/dt; tid_tpp_0 = tZ + tX*BD_nz_tpp + (tY+0)*BD_nx_tpp*BD_nz_tpp; tid_tpp_1 = tZ + tX*BD_nz_tpp + (tY+1)*BD_nx_tpp*BD_nz_tpp; tid_tpp_2 = tZ + tX*BD_nz_tpp + (tY+2)*BD_nx_tpp*BD_nz_tpp; tid_tpp_3 = tZ + tX*BD_nz_tpp + (tY+3)*BD_nx_tpp*BD_nz_tpp; *(dev_vy+tid_vy) = *(dev_vy+tid_vy) + ((*(dev_tyy+tid_tpp_0)* *(dev_fdc) + *(dev_tyy+tid_tpp_1)* *(dev_fdc+1) + *(dev_tyy+tid_tpp_2)* *(dev_fdc+2) + *(dev_tyy+tid_tpp_3)* *(dev_fdc+3)) / dy) / tmp_rho; } //***************** vz ********************// tid_vz = tZ + tX*BD_nz_vz + tY*BD_nx_vz*BD_nz_vz; tid_rho_0 = (tZ+0) + tX*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_rho_1 = (tZ+1) + tX*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; //***************** vz by tzz ********************// if(tX < BD_nx_vz && tY < chunk_full && tZ < BD_nz_vz-1 && tZ > 0) { tmp_rho = (0.5*(*(dev_rho + tid_rho_0) + *(dev_rho + tid_rho_1)))/dt; tid_tpp_0 = (tZ-1) + tX*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_tpp_1 = (tZ+0) + tX*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_tpp_2 = (tZ+1) + tX*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_tpp_3 = (tZ+2) + tX*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tmp = ((*(dev_tzz+tid_tpp_0)* *(dev_fdc) + *(dev_tzz+tid_tpp_1)* *(dev_fdc+1) + *(dev_tzz+tid_tpp_2)* *(dev_fdc+2) + *(dev_tzz+tid_tpp_3)* *(dev_fdc+3)) / dz) / tmp_rho; *(dev_vz+tid_vz) = *(dev_vz+tid_vz) + tmp; } } __global__ void kernel_tau(double *dev_vx, int BD_nx_vx, int BD_nz_vx, double *dev_vy, int BD_nx_vy, int BD_nz_vy, double *dev_vz, int BD_nx_vz, int BD_nz_vz, double *dev_tpp, int BD_nx_tpp, int BD_nz_tpp, double *dev_lambda, double *dev_fdc, double dx, double dy, double dz, double dt, int chunk_full, int chunk_half) { int tX = threadIdx.x + blockIdx.x*blockDim.x; int tY = threadIdx.y + blockIdx.y*blockDim.y; int tZ = threadIdx.z + blockIdx.z*blockDim.z; int tid_tpp, tid_lambda_tpp; int tid_vx_0, tid_vx_1, tid_vx_2, tid_vx_3; int tid_vy_0, tid_vy_1, tid_vy_2, tid_vy_3; int tid_vz_0, tid_vz_1, tid_vz_2, tid_vz_3; double tmp_vx, tmp_vy, tmp_vz; //**************** tau_pp ********************// tid_tpp = tZ + tX*BD_nz_tpp + tY*BD_nz_tpp*BD_nx_tpp; tid_lambda_tpp = tZ + tX*BD_nz_tpp + tY*BD_nz_tpp*BD_nx_tpp; //**************** tau_pp by vx ********************// if(tX > 1 && tX < BD_nx_tpp-2 && tY < chunk_full && tZ < BD_nz_tpp) { tid_vx_0 = tZ + (tX-2)*BD_nz_vx + (tY+2)*BD_nz_vx*BD_nx_vx; tid_vx_1 = tZ + (tX-1)*BD_nz_vx + (tY+2)*BD_nz_vx*BD_nx_vx; tid_vx_2 = tZ + (tX+0)*BD_nz_vx + (tY+2)*BD_nz_vx*BD_nx_vx; tid_vx_3 = tZ + (tX+1)*BD_nz_vx + (tY+2)*BD_nz_vx*BD_nx_vx; tmp_vx = ((*(dev_vx+tid_vx_0)* *(dev_fdc) + *(dev_vx+tid_vx_1)* *(dev_fdc+1) + *(dev_vx+tid_vx_2)* *(dev_fdc+2) + *(dev_vx+tid_vx_3)* *(dev_fdc+3)) / dx)*dt; *(dev_tpp+tid_tpp) = *(dev_tpp+tid_tpp) + *(dev_lambda+tid_lambda_tpp) * tmp_vx; } //**************** tau_pp by vy ********************// if(tX < BD_nx_tpp && tY < chunk_full && tZ < BD_nz_tpp) { tid_vy_0 = tZ + tX*BD_nz_vy + (tY+0)*BD_nz_vy*BD_nx_vy; tid_vy_1 = tZ + tX*BD_nz_vy + (tY+1)*BD_nz_vy*BD_nx_vy; tid_vy_2 = tZ + tX*BD_nz_vy + (tY+2)*BD_nz_vy*BD_nx_vy; tid_vy_3 = tZ + tX*BD_nz_vy + (tY+3)*BD_nz_vy*BD_nx_vy; tmp_vy = ((*(dev_vy+tid_vy_0)* *(dev_fdc) + *(dev_vy+tid_vy_1)* *(dev_fdc+1) + *(dev_vy+tid_vy_2)* *(dev_fdc+2) + *(dev_vy+tid_vy_3)* *(dev_fdc+3)) / dy)*dt; *(dev_tpp+tid_tpp) = *(dev_tpp+tid_tpp) + *(dev_lambda+tid_lambda_tpp) * tmp_vy; } //**************** tau_pp by vz ********************// if(tX < BD_nx_tpp && tY < chunk_full && tZ > 1 && tZ < BD_nz_tpp-2) { tid_vz_0 = (tZ-2) + tX*BD_nz_vz + (tY+2)*BD_nz_vz*BD_nx_vz; tid_vz_1 = (tZ-1) + tX*BD_nz_vz + (tY+2)*BD_nz_vz*BD_nx_vz; tid_vz_2 = (tZ+0) + tX*BD_nz_vz + (tY+2)*BD_nz_vz*BD_nx_vz; tid_vz_3 = (tZ+1) + tX*BD_nz_vz + (tY+2)*BD_nz_vz*BD_nx_vz; tmp_vz = ((*(dev_vz+tid_vz_0)* *(dev_fdc) + *(dev_vz+tid_vz_1)* *(dev_fdc+1) + *(dev_vz+tid_vz_2)* *(dev_fdc+2) + *(dev_vz+tid_vz_3)* *(dev_fdc+3)) / dz)*dt; *(dev_tpp+tid_tpp) = *(dev_tpp+tid_tpp) + *(dev_lambda+tid_lambda_tpp) * tmp_vz; } } //double *fdc/ //double *intvl: dz, dx, dt //double *modelsize : BDnDZ, BDnHX, nDZ, nHX void ac3d_cump(double *vx, int BD_nx_vx, int BD_ny_vx, int BD_nz_vx, double *pvxbtpp, double *vy, int BD_nx_vy, int BD_ny_vy, int BD_nz_vy, double *pvybtpp, double *vz, int BD_nx_vz, int BD_ny_vz, int BD_nz_vz, double *pvzbtpp, double *tpp, double *ptppbvx, double *ptppbvy, double *ptppbvz, int BD_nx_tpp, int BD_ny_tpp, int BD_nz_tpp, double *rho, double *lambda, double *fdc, double dt, double dx, double dy, double dz, int ext, double *bhalf, double *ahalf, double *bfull, double *afull, long int *chk_group_full, long int *chk_group_half, long int Num_group, long int Max_group_dim, long int *chk_stream_full, long int *chk_stream_half, long int Max_num_stream, long int Max_stream_dim, long int *threadim, long int *blockdim) { int i,j,k,t; //************************************************// //**************** GPU setting *******************// //************************************************// int gn,sn; // gn : group number; sn : stream number hipStream_t stream[Max_num_stream]; // create streams for GPU double *dev_fdc; hipMalloc((void**)&dev_fdc, 4*sizeof(double)); // copy fdc to device hipMemcpy(dev_fdc,fdc,4*sizeof(double),hipMemcpyHostToDevice); for(sn=0 ; sn < Max_num_stream ; sn++){ hipStreamCreate(&stream[sn]); // create concurrent streams } // number of threads and blocks used dim3 threads(*threadim,*(threadim+1),*(threadim+2)); dim3 blocks(*blockdim,*(blockdim+1),*(blockdim+2)); // pinned host memory for faster transfer between host and device mem double *host_vx, *host_vy, *host_vz; double *host_tpp, double *host_rho, *host_lambda; hipHostMalloc((void**)&host_vx, (Max_group_dim+3)*BD_nx_vx*BD_nz_vx*sizeof(double), hipHostMallocDefault); hipHostMalloc((void**)&host_vy, (Max_group_dim+3)*BD_nx_vy*BD_nz_vy*sizeof(double), hipHostMallocDefault); hipHostMalloc((void**)&host_vz, (Max_group_dim+3)*BD_nx_vz*BD_nz_vz*sizeof(double), hipHostMallocDefault); hipHostMalloc((void**)&host_tpp, Max_group_dim*BD_nx_tpp*BD_nz_tpp*sizeof(double), hipHostMallocDefault); hipHostMalloc((void**)&host_rho, (Max_group_dim+1)*BD_nx_tpp*BD_nz_tpp*sizeof(double), hipHostMallocDefault); hipHostMalloc((void**)&host_lambda, Max_group_dim*BD_nx_tpp*BD_nz_tpp*sizeof(double), hipHostMallocDefault); // device memory for each stream double *dev_vx[Max_num_stream], *dev_vy[Max_num_stream], *dev_vz[Max_num_stream]; double *dev_tpp[Max_num_stream]; double *dev_rho[Max_num_stream], *dev_lambda[Max_num_stream]; for(i=0 ; i<Max_num_stream ; i++){ hipMalloc((void**)&dev_vx[i], (Max_stream_dim+3)*BD_nx_vx*BD_nz_vx*sizeof(double)); hipMalloc((void**)&dev_vy[i], (Max_stream_dim+3)*BD_nx_vy*BD_nz_vy*sizeof(double)); hipMalloc((void**)&dev_vz[i], (Max_stream_dim+3)*BD_nx_vz*BD_nz_vz*sizeof(double)); hipMalloc((void**)&dev_txx[i], Max_stream_dim*BD_nx_tpp*BD_nz_tpp*sizeof(double)); hipMalloc((void**)&dev_rho[i], (Max_stream_dim+1)*BD_nx_tpp*BD_nz_tpp*sizeof(double)); hipMalloc((void**)&dev_lambda[i], Max_stream_dim*BD_nx_tpp*BD_nz_tpp*sizeof(double)); } int start_group_full = 0; int start_group_half = 0; int offset_group_full; int offset_group_half; int start_stream_full[Max_num_stream]; int start_stream_half[Max_num_stream]; int offset_stream_full[Max_num_stream]; int offset_stream_half[Max_num_stream]; for(gn=0 ; gn<Num_group; gn++) { start_stream_full[0] = 0; start_stream_half[0] = 0; for(sn=0;sn<Max_num_stream;sn++) { offset_stream_full[sn] = *(chk_stream_full+gn*(Max_num_stream+2)+sn+2); offset_stream_half[sn] = *(chk_stream_half+gn*(Max_num_stream+2)+sn+2); } for(sn=1;sn<Max_num_stream;sn++) { start_stream_full[sn] = start_stream_full[sn-1] + offset_stream_full[sn-1]; start_stream_half[sn] = start_stream_half[sn-1] + offset_stream_half[sn-1]; } } //************ time iteration ************// for(t=0;t<1;t++) { // *****************************************************************// // ********************** GPU particle velocities ******************// // *****************************************************************// start_group_full = 2; start_group_half = 1; for(gn=0 ; gn<Num_group; gn++) { start_group_full = start_group_full + *(chk_group_full+gn); start_group_half = start_group_half + *(chk_group_half+gn); offset_group_full = *(chk_group_full+gn+1); offset_group_half = *(chk_group_half+gn+1); // vx, vz, txx, tzz, txz memcpy(host_vx, vx+start_group_full*BD_nz_vx*BD_nx_vx, offset_group_full*BD_nz_vx*BD_nx_vx*sizeof(double)); memcpy(host_vy, vy+start_group_half*BD_nz_vy*BD_nx_vy, offset_group_half*BD_nz_vy*BD_nx_vy*sizeof(double)); memcpy(host_vz, vz+start_group_full*BD_nz_vz*BD_nx_vz, offset_group_full*BD_nz_vz*BD_nx_vz*sizeof(double)); memcpy(host_tpp, tpp+start_group_full*BD_nz_tpp*BD_nx_tpp, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(host_rho, rho+start_group_half*BD_nz_tpp*BD_nx_tpp, (offset_group_half+1)*BD_nz_tpp*BD_nx_tpp*sizeof(double)); // copy host memory data to device memory() for(sn=0 ; sn<*(chk_stream_full+gn*(Max_num_stream+2)); sn++) { hipMemcpyAsync(dev_vx[sn], host_vx+start_stream_full[sn]*BD_nz_vx*BD_nx_vx, offset_stream_full[sn]*BD_nz_vx*BD_nx_vx*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_vz[sn], host_vz+start_stream_full[sn]*BD_nz_vz*BD_nx_vz, offset_stream_full[sn]*BD_nz_vz*BD_nx_vz*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_txx[sn], host_txx+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_tzz[sn], host_tzz+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_txz[sn], host_txz+start_stream_full[sn]*BD_nz_txz*BD_nx_txz, offset_stream_full[sn]*BD_nz_txz*BD_nx_txz*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_txy[sn], host_txy+start_stream_full[sn]*BD_nz_txy*BD_nx_txy, (offset_stream_full[sn]+3)*BD_nz_txy*BD_nx_txy*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_tyz[sn], host_tyz+start_stream_full[sn]*BD_nz_tyz*BD_nx_tyz, (offset_stream_full[sn]+3)*BD_nz_tyz*BD_nx_tyz*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_vy[sn], host_vy+start_stream_half[sn]*BD_nz_vy*BD_nx_vy, offset_stream_half[sn]*BD_nz_vy*BD_nx_vy*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_tyy[sn], host_tyy+start_stream_half[sn]*BD_nz_tpp*BD_nx_tpp, (offset_stream_half[sn]+3)*BD_nz_tpp*BD_nx_tpp*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_rho[sn], host_rho+start_stream_half[sn]*BD_nz_tpp*BD_nx_tpp, (offset_stream_half[sn]+1)*BD_nz_tpp*BD_nx_tpp*sizeof(double), hipMemcpyHostToDevice, stream[sn]); } // if(err!= hipSuccess){printf("%s\n","error");} for(sn=0 ; sn<*(chk_stream_full+gn*(Max_num_stream+2)); sn++) { hipLaunchKernelGGL(( kernel_v), dim3(blocks),dim3(threads),0,stream[sn], dev_vx[sn], BD_nx_vx, BD_nz_vx, dev_vy[sn], BD_nx_vy, BD_nz_vy, dev_vz[sn], BD_nx_vz, BD_nz_vz, dev_txx[sn], dev_tyy[sn], dev_tzz[sn], BD_nx_tpp, BD_nz_tpp, dev_txy[sn], BD_nx_txy, BD_nz_txy, dev_tyz[sn], BD_nx_tyz, BD_nz_tyz, dev_txz[sn], BD_nx_txz, BD_nz_txz, dev_rho[sn], dev_fdc, dx, dy, dz, dt, offset_stream_full[sn], offset_stream_half[sn]); } for(sn=0 ; sn<*(chk_stream_full+gn*(Max_num_stream+2)); sn++) { hipMemcpyAsync(host_vx+start_stream_full[sn]*BD_nz_vx*BD_nx_vx, dev_vx[sn], offset_stream_full[sn]*BD_nz_vx*BD_nx_vx*sizeof(double), hipMemcpyDeviceToHost, stream[sn]); hipMemcpyAsync(host_vz+start_stream_full[sn]*BD_nz_vz*BD_nx_vz, dev_vz[sn], offset_stream_full[sn]*BD_nz_vz*BD_nx_vz*sizeof(double), hipMemcpyDeviceToHost, stream[sn]); hipMemcpyAsync(host_vy+start_stream_half[sn]*BD_nz_vy*BD_nx_vy, dev_vy[sn], offset_stream_half[sn]*BD_nz_vy*BD_nx_vy*sizeof(double), hipMemcpyDeviceToHost, stream[sn]); hipMemcpyAsync(host_txx+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, dev_txx[sn], offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), hipMemcpyDeviceToHost, stream[sn]); } hipDeviceSynchronize(); memcpy(vx+start_group_full*BD_nz_vx*BD_nx_vx, host_vx, offset_group_full*BD_nz_vx*BD_nx_vx*sizeof(double)); memcpy(vy+start_group_half*BD_nz_vy*BD_nx_vy, host_vy, offset_group_half*BD_nz_vy*BD_nx_vy*sizeof(double)); memcpy(vz+start_group_full*BD_nz_vz*BD_nx_vz, host_vz, offset_group_full*BD_nz_vz*BD_nx_vz*sizeof(double)); memcpy(txx+start_group_full*BD_nz_tpp*BD_nx_tpp, host_txx, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); } // *************************************************// // ******* openmp particle velocity boundary *******// // *************************************************// // vxbtxx for(k=0; k<BD_ny_vx; k++) { for(i=0; i<BD_nz_vx; i++) { for(j=1; j<ext; j++) { bound_x(vx, BD_nz_vx, BD_nx_vx, BD_ny_vx, i, j, k, txx, BD_nz_tpp, BD_nx_tpp, 1, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+i+(BD_nx_vx-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(BD_nx_vx-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), dx, dt, pvxbtxx, bhalf, ahalf, ext, fdc); } } } // vybtxy for(k=0; k<BD_ny_vy; k++) { for(i=0; i<BD_nz_vy; i++) { for(j=2; j<ext; j++) { bound_x(vy, BD_nz_vy, BD_nx_vy, BD_ny_vy, i, j, k, txy, BD_nz_txy, BD_nx_txy, 2, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+i+(BD_nx_vy-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(BD_nx_vy-1-j)*BD_nz_tpp+(BD_ny_vy-k)*BD_nz_tpp*BD_nx_tpp)), dx, dt, pvybtxy, bfull, afull, ext, fdc); } } } // vzbtxz for(k=0; k<BD_ny_vz; k++) { for(i=0; i<BD_nz_vz; i++) { for(j=2; j<ext; j++) { bound_x(vz, BD_nz_vz, BD_nx_vz, BD_ny_vz, i, j, k, txz, BD_nz_txz, BD_nx_txz, 2, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+i+(BD_nx_vz-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(i+1)+(BD_nx_vz-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), dx,dt, pvzbtxz, bfull, afull, ext, fdc); } } } //********************* V_Y *********************// // vxbtxy for(j=0; j<BD_nx_vx; j++) { for(i=0; i<BD_nz_vx; i++) { for(k=2; k<ext; k++) { bound_y(vx, BD_nz_vx, BD_nx_vx, BD_ny_vx, i, j, k, txy, BD_nz_txy, BD_nx_txy, 2, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+i+j*BD_nz_tpp+(BD_ny_vx-1-k)*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(j+1)*BD_nz_tpp+(BD_ny_vx-1-k)*BD_nz_tpp*BD_nx_tpp)), dy,dt, pvxbtxy, bfull, afull, ext, fdc); } } } // vybtyy for(j=0; j<BD_nx_vy; j++) { for(i=0; i<BD_nz_vy; i++) { for(k=1; k<ext; k++) { bound_y(vy, BD_nz_vy, BD_nx_vy, BD_ny_vy, i, j, k, tyy, BD_nz_tpp, BD_nx_tpp, 1, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+i+j*BD_nz_tpp+(BD_ny_vy-1-k)*BD_nz_tpp*BD_nx_tpp)+*(rho+i+j*BD_nz_tpp+(BD_ny_vy-k)*BD_nz_tpp*BD_nx_tpp)), dy,dt, pvybtyy, bhalf, ahalf, ext, fdc); } } } // vzbtyz for(j=0; j<BD_nx_vz; j++) { for(i=0; i<BD_nz_vz; i++) { for(k=2; k<ext; k++) { bound_y(vz, BD_nz_vz, BD_nx_vz, BD_ny_vz, i, j, k, tyz, BD_nz_tyz, BD_nx_tyz, 2, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+i+j*BD_nz_tpp+(BD_ny_vz-1-k)*BD_nz_tpp*BD_nx_tpp)+*(rho+(i+1)+j*BD_nz_tpp+(BD_ny_vz-1-k)*BD_nz_tpp*BD_nx_tpp)), dy,dt, pvzbtyz, bfull, afull, ext, fdc); } } } //********************* V_Z *********************// // vxbtxz for(j=0; j<BD_nx_vx; j++) { for(k=0; k<BD_ny_vx; k++) { for(i=2; i<ext; i++) { unlimited_bound_z(vx, BD_nz_vx, BD_nx_vx, BD_ny_vx, i, j, k, txz, BD_nz_txz, BD_nx_txz, 2, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+(BD_nz_vx-1-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(BD_nz_vx-1-i)+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), dz,dt, pvxbtxz, bfull, afull, ext, fdc); } } } // vybtyz for(j=0; j<BD_nx_vy; j++) { for(k=0; k<BD_ny_vy; k++) { for(i=2; i<ext; i++) { unlimited_bound_z(vy, BD_nz_vy, BD_nx_vy, BD_ny_vy, i, j, k, tyz, BD_nz_tyz, BD_nx_tyz, 2, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+(BD_nz_vy-1-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(BD_nz_vy-1-i)+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp)), dz,dt, pvybtyz, bfull, afull, ext, fdc); } } } // vzbtzz for(j=0; j<BD_nx_vz; j++) { for(k=0; k<BD_ny_vz; k++) { for(i=1;i<ext;i++) { unlimited_bound_z(vz, BD_nz_vz, BD_nx_vz, BD_ny_vz, i, j, k, tzz, BD_nz_tpp, BD_nx_tpp, 1, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+(BD_nz_vz-1-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(BD_nz_vz-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), dz,dt, pvzbtzz, bhalf, ahalf, ext, fdc); } } } //*****************************************************************// //**************************** GPU stress *************************// //*****************************************************************// start_group_full = 2; start_group_half = 1; for(gn=0 ; gn<Num_group; gn++) { start_group_full = start_group_full + *(chk_group_full+gn); start_group_half = start_group_half + *(chk_group_half+gn); offset_group_full = *(chk_group_full+gn+1); offset_group_half = *(chk_group_half+gn+1); // vx, vz, txx, tzz, txz memcpy(host_txx, txx+start_group_full*BD_nz_tpp*BD_nx_tpp, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(host_tzz, tzz+start_group_full*BD_nz_tpp*BD_nx_tpp, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(host_tyy, tyy+start_group_full*BD_nz_tpp*BD_nx_tpp, offset_group_half*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(host_txy, txy+start_group_half*BD_nz_txy*BD_nx_txy, offset_group_half*BD_nz_txy*BD_nx_txy*sizeof(double)); memcpy(host_tyz, tyz+start_group_half*BD_nz_tyz*BD_nx_tyz, offset_group_half*BD_nz_tyz*BD_nx_tyz*sizeof(double)); memcpy(host_txz, txz+start_group_full*BD_nz_txz*BD_nx_txz, offset_group_full*BD_nz_txz*BD_nx_txz*sizeof(double)); memcpy(host_vx, vx+(start_group_half-1)*BD_nz_vx*BD_nx_vx, (offset_group_half+3)*BD_nz_vx*BD_nx_vx*sizeof(double)); memcpy(host_vy, vy+(start_group_full-2)*BD_nz_vy*BD_nx_vy, (offset_group_full+3)*BD_nz_vy*BD_nx_vy*sizeof(double)); memcpy(host_vz, vz+(start_group_half-1)*BD_nz_vz*BD_nx_vz, (offset_group_half+3)*BD_nz_vz*BD_nx_vz*sizeof(double)); memcpy(host_lambda, lambda+start_group_full*BD_nz_tpp*BD_nx_tpp, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(host_mu, mu+start_group_half*BD_nz_tpp*BD_nx_tpp, (offset_group_half+1)*BD_nz_tpp*BD_nx_tpp*sizeof(double)); // copy host memory data to device memory() for(sn=0 ; sn<*(chk_stream_full+gn*(Max_num_stream+2)); sn++) { hipMemcpyAsync(dev_txx[sn], host_txx+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_tzz[sn], host_tzz+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_tyy[sn], host_tyy+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_txz[sn], host_txz+start_stream_full[sn]*BD_nz_txz*BD_nx_txz, offset_stream_full[sn]*BD_nz_txz*BD_nx_txz*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_txy[sn], host_txy+start_stream_half[sn]*BD_nz_txy*BD_nx_txy, offset_stream_half[sn]*BD_nz_txy*BD_nx_txy*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_tyz[sn], host_tyz+start_stream_half[sn]*BD_nz_tyz*BD_nx_tyz, offset_stream_half[sn]*BD_nz_tyz*BD_nx_tyz*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_vx[sn], host_vx+start_stream_half[sn]*BD_nz_vx*BD_nx_vx, (offset_stream_half[sn]+3)*BD_nz_vx*BD_nx_vx*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_vz[sn], host_vz+start_stream_half[sn]*BD_nz_vz*BD_nx_vz, (offset_stream_half[sn]+3)*BD_nz_vz*BD_nx_vz*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_vy[sn], host_vy+start_stream_full[sn]*BD_nz_vy*BD_nx_vy, (offset_stream_full[sn]+3)*BD_nz_vy*BD_nx_vy*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_lambda[sn], host_lambda+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), hipMemcpyHostToDevice, stream[sn]); hipMemcpyAsync(dev_mu[sn], host_mu+start_stream_half[sn]*BD_nz_tpp*BD_nx_tpp, (offset_stream_half[sn]+1)*BD_nz_tpp*BD_nx_tpp*sizeof(double), hipMemcpyHostToDevice, stream[sn]); } for(sn=0 ; sn<*(chk_stream_full+gn*(Max_num_stream+2)); sn++) { hipLaunchKernelGGL(( kernel_tau), dim3(blocks),dim3(threads),0,stream[sn], dev_vx[sn], BD_nx_vx, BD_nz_vx, dev_vy[sn], BD_nx_vy, BD_nz_vy, dev_vz[sn], BD_nx_vz, BD_nz_vz, dev_txx[sn], dev_tyy[sn], dev_tzz[sn], BD_nx_tpp, BD_nz_tpp, dev_txy[sn], BD_nx_txy, BD_nz_txy, dev_tyz[sn], BD_nx_tyz, BD_nz_tyz, dev_txz[sn], BD_nx_txz, BD_nz_txz, dev_lambda[sn], dev_mu[sn], dev_fdc, dx, dy, dz, dt, offset_stream_full[sn], offset_stream_half[sn]); } for(sn=0 ; sn<*(chk_stream_full+gn*(Max_num_stream+2)); sn++) { hipMemcpyAsync(host_txx+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, dev_txx[sn], offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), hipMemcpyDeviceToHost, stream[sn]); hipMemcpyAsync(host_tyy+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, dev_tyy[sn], offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), hipMemcpyDeviceToHost, stream[sn]); hipMemcpyAsync(host_tzz+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, dev_tzz[sn], offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), hipMemcpyDeviceToHost, stream[sn]); hipMemcpyAsync(host_txz+start_stream_full[sn]*BD_nz_txz*BD_nx_txz, dev_txz[sn], offset_stream_full[sn]*BD_nz_txz*BD_nx_txz*sizeof(double), hipMemcpyDeviceToHost, stream[sn]); hipMemcpyAsync(host_txy+start_stream_half[sn]*BD_nz_txy*BD_nx_txy, dev_txy[sn], offset_stream_half[sn]*BD_nz_txy*BD_nx_txy*sizeof(double), hipMemcpyDeviceToHost, stream[sn]); hipMemcpyAsync(host_tyz+start_stream_half[sn]*BD_nz_tyz*BD_nx_tyz, dev_tyz[sn], offset_stream_half[sn]*BD_nz_tyz*BD_nx_tyz*sizeof(double), hipMemcpyDeviceToHost, stream[sn]); } hipDeviceSynchronize(); memcpy(txx+start_group_full*BD_nz_tpp*BD_nx_tpp, host_txx, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(tzz+start_group_full*BD_nz_tpp*BD_nx_tpp, host_tzz, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(tyy+start_group_full*BD_nz_tpp*BD_nx_tpp, host_tyy, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(txy+start_group_half*BD_nz_txy*BD_nx_txy, host_txy, offset_group_half*BD_nz_txy*BD_nx_txy*sizeof(double)); memcpy(tyz+start_group_half*BD_nz_tyz*BD_nx_tyz, host_tyz, offset_group_half*BD_nz_tyz*BD_nx_tyz*sizeof(double)); memcpy(txz+start_group_full*BD_nz_txz*BD_nx_txz, host_txz, offset_group_full*BD_nz_txz*BD_nx_txz*sizeof(double)); } //*************************************************// //******* openmp stress boundary *******// //*************************************************// // #pragma omp for collapse(3) for(k=0; k<BD_ny_tpp; k++) { for(i=0; i<BD_nz_tpp; i++) { for(j=2; j<ext; j++) { bound_tpp_x(txx, tyy, tzz, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp, i, j, k, vx, BD_nz_vx, BD_nx_vx, 2, lambda, mu, dx, dt, ptxxbvx, ptyybvx, ptzzbvx, bhalf, ahalf, ext, fdc); } } } // #pragma omp for collapse(3) for(k=0; k<BD_ny_txy; k++) { for(i=0; i<BD_nz_txy; i++) { for(j=1; j<ext; j++) { bound_x(txy, BD_nz_txy, BD_nx_txy, BD_ny_txy, i, j, k, vy, BD_nz_vy, BD_nx_vy, 1, (*(mu+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp))/4, (*(mu+i+(BD_nx_txy-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(BD_nx_txy-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(BD_nx_txy-j)*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(BD_nx_txy-1-j)*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp))/4, dx, dt, ptxybvy, bfull, afull, ext, fdc); } } } // // #pragma omp for collapse(3) for(k=0; k<BD_ny_txz; k++) { for(i=0; i<BD_nz_txz; i++) { for(j=1; j<ext; j++) { bound_x(txz, BD_nz_txz, BD_nx_txz, BD_ny_txz, i, j, k, vz, BD_nz_vz, BD_nx_vz, 1, (*(mu+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp))/4, (*(mu+i+(BD_nx_txz-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(BD_nx_txz-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+(BD_nx_txz-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+(BD_nx_txz-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp))/4, dx, dt, ptxzbvz, bfull, afull, ext, fdc); } } } // //************ T_Y ************// // #pragma omp for collapse(3) for(i=0; i<BD_nz_tpp; i++) { for(j=0; j<BD_nx_tpp; j++) { for(k=2; k<ext; k++) { bound_tpp_y(txx, tyy, tzz, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp, i, j, k, vy, BD_nz_vy, BD_nx_vy, 2, lambda, mu, dy, dt, ptxxbvy, ptyybvy, ptzzbvy, bhalf, ahalf, ext, fdc); } } } // // #pragma omp for collapse(3) for(i=0; i<BD_nz_txy; i++) { for(j=0; j<BD_nx_txy; j++) { for(k=1; k<ext; k++) { bound_y(txy, BD_nz_txy, BD_nx_txy, BD_ny_txy, i, j, k, vx, BD_nz_vx, BD_nx_vx, 1, (*(mu+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp))/4, (*(mu+i+j*BD_nz_tpp+(BD_ny_txy-k-1)*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+(BD_ny_txy-k-1)*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+(BD_ny_txy-k)*BD_nz_tpp*BD_nx_tpp) + *(mu+i+j*BD_nz_tpp+(BD_ny_txy-k)*BD_nz_tpp*BD_nx_tpp))/4, dy, dt, ptxybvx, bfull, afull, ext, fdc); } } } for(i=0; i<BD_nz_tyz; i++) { for(j=0; j<BD_nx_tyz; j++) { for(k=1; k<ext; k++) { bound_y(tyz, BD_nz_tyz, BD_nx_tyz, BD_ny_tyz, i, j, k, vz, BD_nz_vz, BD_nx_vz, 1, (*(mu+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp))/4, (*(mu+i+j*BD_nz_tpp+(BD_ny_tyz-1-k)*BD_nz_tpp*BD_nx_tpp) + *(mu+i+j*BD_nz_tpp+(BD_ny_tyz-k)*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+(BD_ny_tyz-k)*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+(BD_ny_tyz-1-k)*BD_nz_tpp*BD_nx_tpp))/4, dy, dt, ptyzbvz, bfull, afull, ext, fdc); } } } //************ T_Z ************// // #pragma omp for collapse(3) for(j=0; j<BD_nx_tpp; j++) { for(k=0; k<BD_ny_tpp; k++) { for(i=2; i<ext; i++) { unlimited_bound_tpp_z(txx, tyy, tzz, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp, i, j, k, vz, BD_nz_vz, BD_nx_vz, 2, lambda, mu, dz, dt, ptxxbvz, ptyybvz, ptzzbvz, bhalf, ahalf, ext, fdc); } } } // // #pragma omp for collapse(3) for(j=0; j<BD_nx_txz; j++) { for(k=0; k<BD_ny_txz; k++) { for(i=1; i<ext; i++) { unlimited_bound_z(txz, BD_nz_txz, BD_nx_txz, BD_ny_txz, i, j, k, vx, BD_nz_vx, BD_nx_vx, 1, (*(mu+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp))/4, (*(mu+(BD_nz_txz-1-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(BD_nz_txz-1-i)+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(BD_nz_txz-i)+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(BD_nz_txz-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp))/4, dz, dt, ptxzbvx, bfull, afull, ext, fdc); } } } // // #pragma omp for collapse(3) for(j=0; j<BD_nx_tyz; j++) { for(k=0; k<BD_ny_tyz; k++) { for(i=0; i<ext; i++) { unlimited_bound_z(tyz, BD_nz_tyz, BD_nx_tyz, BD_ny_tyz, i, j, k, vy, BD_nz_vy, BD_nx_vy, 1, (*(mu+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp))/4, (*(mu+(BD_nz_tyz-1-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(BD_nz_tyz-1-i)+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+(BD_nz_tyz-i)+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+(BD_nz_tyz-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp))/4, dz, dt, ptyzbvy, bfull, afull, ext, fdc); } } } } for(i=0 ; i<Max_num_stream ; i++) { hipFree(dev_vx[i]); hipFree(dev_vy[i]); hipFree(dev_vz[i]); hipFree(dev_txx[i]); hipFree(dev_tyy[i]); hipFree(dev_tzz[i]); hipFree(dev_txy[i]); hipFree(dev_tyz[i]); hipFree(dev_txz[i]); hipFree(dev_rho[i]); hipFree(dev_lambda[i]); hipFree(dev_mu[i]); hipStreamDestroy(stream[i]); } hipHostFree(host_vx); hipHostFree(host_vy); hipHostFree(host_vz); hipHostFree(host_txx); hipHostFree(host_tyy); hipHostFree(host_tzz); hipHostFree(host_txy); hipHostFree(host_tyz); hipHostFree(host_txz); hipHostFree(host_rho); hipHostFree(host_lambda); hipHostFree(host_mu); } }
e42110cc33af22a925f8a4b579d530d232345f90.cu
#include<stdio.h> #include"fd_cuda_lib.c" extern "C"{ __global__ void kernel_v(double *dev_vx, int BD_nx_vx, int BD_nz_vx, double *dev_vy, int BD_nx_vy, int BD_nz_vy, double *dev_vz, int BD_nx_vz, int BD_nz_vz, double *dev_tpp, int BD_nx_tpp, int BD_nz_tpp, double *dev_rho, double *dev_fdc, double dx, double dy, double dz, double dt, int chunk_full, int chunk_half) { int tX = threadIdx.x + blockIdx.x*blockDim.x; int tY = threadIdx.y + blockIdx.y*blockDim.y; int tZ = threadIdx.z + blockIdx.z*blockDim.z; int tid_vx, tid_vy, tid_vz; int tid_tpp_0, tid_tpp_1, tid_tpp_2, tid_tpp_3; int tid_rho_0, tid_rho_1; double tmp, tmp_rho; //***************** vx ********************// tid_vx = tZ + tX*BD_nz_vx + tY*BD_nx_vx*BD_nz_vx; tid_rho_0 = tZ + (tX+0)*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_rho_1 = tZ + (tX+1)*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; //***************** vx by txx ********************// if(tX < BD_nx_vx-1 && tX > 0 && tY < chunk_full && tZ < BD_nz_vx) { tmp_rho = (0.5*(*(dev_rho + tid_rho_0) + *(dev_rho + tid_rho_1)))/dt; tid_tpp_0 = tZ + (tX-1)*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_tpp_1 = tZ + (tX+0)*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_tpp_2 = tZ + (tX+1)*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_tpp_3 = tZ + (tX+2)*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; *(dev_vx+tid_vx) = *(dev_vx+tid_vx) + ((*(dev_txx+tid_tpp_0)* *(dev_fdc) + *(dev_txx+tid_tpp_1)* *(dev_fdc+1) + *(dev_txx+tid_tpp_2)* *(dev_fdc+2) + *(dev_txx+tid_tpp_3)* *(dev_fdc+3)) / dx)/ tmp_rho; } //***************** vy ********************// tid_vy = tZ + tX*BD_nz_vy + tY*BD_nx_vy*BD_nz_vy; tid_rho_0 = tZ + tX*BD_nz_tpp + (tY+0)*BD_nx_tpp*BD_nz_tpp; tid_rho_1 = tZ + tX*BD_nz_tpp + (tY+1)*BD_nx_tpp*BD_nz_tpp; //***************** vy by tyy********************// if(tX < BD_nx_vy && tY < chunk_half && tZ < BD_nz_vy) { tmp_rho = (0.5*(*(dev_rho + tid_rho_0) + *(dev_rho + tid_rho_1)))/dt; tid_tpp_0 = tZ + tX*BD_nz_tpp + (tY+0)*BD_nx_tpp*BD_nz_tpp; tid_tpp_1 = tZ + tX*BD_nz_tpp + (tY+1)*BD_nx_tpp*BD_nz_tpp; tid_tpp_2 = tZ + tX*BD_nz_tpp + (tY+2)*BD_nx_tpp*BD_nz_tpp; tid_tpp_3 = tZ + tX*BD_nz_tpp + (tY+3)*BD_nx_tpp*BD_nz_tpp; *(dev_vy+tid_vy) = *(dev_vy+tid_vy) + ((*(dev_tyy+tid_tpp_0)* *(dev_fdc) + *(dev_tyy+tid_tpp_1)* *(dev_fdc+1) + *(dev_tyy+tid_tpp_2)* *(dev_fdc+2) + *(dev_tyy+tid_tpp_3)* *(dev_fdc+3)) / dy) / tmp_rho; } //***************** vz ********************// tid_vz = tZ + tX*BD_nz_vz + tY*BD_nx_vz*BD_nz_vz; tid_rho_0 = (tZ+0) + tX*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_rho_1 = (tZ+1) + tX*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; //***************** vz by tzz ********************// if(tX < BD_nx_vz && tY < chunk_full && tZ < BD_nz_vz-1 && tZ > 0) { tmp_rho = (0.5*(*(dev_rho + tid_rho_0) + *(dev_rho + tid_rho_1)))/dt; tid_tpp_0 = (tZ-1) + tX*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_tpp_1 = (tZ+0) + tX*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_tpp_2 = (tZ+1) + tX*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tid_tpp_3 = (tZ+2) + tX*BD_nz_tpp + tY*BD_nx_tpp*BD_nz_tpp; tmp = ((*(dev_tzz+tid_tpp_0)* *(dev_fdc) + *(dev_tzz+tid_tpp_1)* *(dev_fdc+1) + *(dev_tzz+tid_tpp_2)* *(dev_fdc+2) + *(dev_tzz+tid_tpp_3)* *(dev_fdc+3)) / dz) / tmp_rho; *(dev_vz+tid_vz) = *(dev_vz+tid_vz) + tmp; } } __global__ void kernel_tau(double *dev_vx, int BD_nx_vx, int BD_nz_vx, double *dev_vy, int BD_nx_vy, int BD_nz_vy, double *dev_vz, int BD_nx_vz, int BD_nz_vz, double *dev_tpp, int BD_nx_tpp, int BD_nz_tpp, double *dev_lambda, double *dev_fdc, double dx, double dy, double dz, double dt, int chunk_full, int chunk_half) { int tX = threadIdx.x + blockIdx.x*blockDim.x; int tY = threadIdx.y + blockIdx.y*blockDim.y; int tZ = threadIdx.z + blockIdx.z*blockDim.z; int tid_tpp, tid_lambda_tpp; int tid_vx_0, tid_vx_1, tid_vx_2, tid_vx_3; int tid_vy_0, tid_vy_1, tid_vy_2, tid_vy_3; int tid_vz_0, tid_vz_1, tid_vz_2, tid_vz_3; double tmp_vx, tmp_vy, tmp_vz; //**************** tau_pp ********************// tid_tpp = tZ + tX*BD_nz_tpp + tY*BD_nz_tpp*BD_nx_tpp; tid_lambda_tpp = tZ + tX*BD_nz_tpp + tY*BD_nz_tpp*BD_nx_tpp; //**************** tau_pp by vx ********************// if(tX > 1 && tX < BD_nx_tpp-2 && tY < chunk_full && tZ < BD_nz_tpp) { tid_vx_0 = tZ + (tX-2)*BD_nz_vx + (tY+2)*BD_nz_vx*BD_nx_vx; tid_vx_1 = tZ + (tX-1)*BD_nz_vx + (tY+2)*BD_nz_vx*BD_nx_vx; tid_vx_2 = tZ + (tX+0)*BD_nz_vx + (tY+2)*BD_nz_vx*BD_nx_vx; tid_vx_3 = tZ + (tX+1)*BD_nz_vx + (tY+2)*BD_nz_vx*BD_nx_vx; tmp_vx = ((*(dev_vx+tid_vx_0)* *(dev_fdc) + *(dev_vx+tid_vx_1)* *(dev_fdc+1) + *(dev_vx+tid_vx_2)* *(dev_fdc+2) + *(dev_vx+tid_vx_3)* *(dev_fdc+3)) / dx)*dt; *(dev_tpp+tid_tpp) = *(dev_tpp+tid_tpp) + *(dev_lambda+tid_lambda_tpp) * tmp_vx; } //**************** tau_pp by vy ********************// if(tX < BD_nx_tpp && tY < chunk_full && tZ < BD_nz_tpp) { tid_vy_0 = tZ + tX*BD_nz_vy + (tY+0)*BD_nz_vy*BD_nx_vy; tid_vy_1 = tZ + tX*BD_nz_vy + (tY+1)*BD_nz_vy*BD_nx_vy; tid_vy_2 = tZ + tX*BD_nz_vy + (tY+2)*BD_nz_vy*BD_nx_vy; tid_vy_3 = tZ + tX*BD_nz_vy + (tY+3)*BD_nz_vy*BD_nx_vy; tmp_vy = ((*(dev_vy+tid_vy_0)* *(dev_fdc) + *(dev_vy+tid_vy_1)* *(dev_fdc+1) + *(dev_vy+tid_vy_2)* *(dev_fdc+2) + *(dev_vy+tid_vy_3)* *(dev_fdc+3)) / dy)*dt; *(dev_tpp+tid_tpp) = *(dev_tpp+tid_tpp) + *(dev_lambda+tid_lambda_tpp) * tmp_vy; } //**************** tau_pp by vz ********************// if(tX < BD_nx_tpp && tY < chunk_full && tZ > 1 && tZ < BD_nz_tpp-2) { tid_vz_0 = (tZ-2) + tX*BD_nz_vz + (tY+2)*BD_nz_vz*BD_nx_vz; tid_vz_1 = (tZ-1) + tX*BD_nz_vz + (tY+2)*BD_nz_vz*BD_nx_vz; tid_vz_2 = (tZ+0) + tX*BD_nz_vz + (tY+2)*BD_nz_vz*BD_nx_vz; tid_vz_3 = (tZ+1) + tX*BD_nz_vz + (tY+2)*BD_nz_vz*BD_nx_vz; tmp_vz = ((*(dev_vz+tid_vz_0)* *(dev_fdc) + *(dev_vz+tid_vz_1)* *(dev_fdc+1) + *(dev_vz+tid_vz_2)* *(dev_fdc+2) + *(dev_vz+tid_vz_3)* *(dev_fdc+3)) / dz)*dt; *(dev_tpp+tid_tpp) = *(dev_tpp+tid_tpp) + *(dev_lambda+tid_lambda_tpp) * tmp_vz; } } //double *fdc/ //double *intvl: dz, dx, dt //double *modelsize : BDnDZ, BDnHX, nDZ, nHX void ac3d_cump(double *vx, int BD_nx_vx, int BD_ny_vx, int BD_nz_vx, double *pvxbtpp, double *vy, int BD_nx_vy, int BD_ny_vy, int BD_nz_vy, double *pvybtpp, double *vz, int BD_nx_vz, int BD_ny_vz, int BD_nz_vz, double *pvzbtpp, double *tpp, double *ptppbvx, double *ptppbvy, double *ptppbvz, int BD_nx_tpp, int BD_ny_tpp, int BD_nz_tpp, double *rho, double *lambda, double *fdc, double dt, double dx, double dy, double dz, int ext, double *bhalf, double *ahalf, double *bfull, double *afull, long int *chk_group_full, long int *chk_group_half, long int Num_group, long int Max_group_dim, long int *chk_stream_full, long int *chk_stream_half, long int Max_num_stream, long int Max_stream_dim, long int *threadim, long int *blockdim) { int i,j,k,t; //************************************************// //**************** GPU setting *******************// //************************************************// int gn,sn; // gn : group number; sn : stream number cudaStream_t stream[Max_num_stream]; // create streams for GPU double *dev_fdc; cudaMalloc((void**)&dev_fdc, 4*sizeof(double)); // copy fdc to device cudaMemcpy(dev_fdc,fdc,4*sizeof(double),cudaMemcpyHostToDevice); for(sn=0 ; sn < Max_num_stream ; sn++){ cudaStreamCreate(&stream[sn]); // create concurrent streams } // number of threads and blocks used dim3 threads(*threadim,*(threadim+1),*(threadim+2)); dim3 blocks(*blockdim,*(blockdim+1),*(blockdim+2)); // pinned host memory for faster transfer between host and device mem double *host_vx, *host_vy, *host_vz; double *host_tpp, double *host_rho, *host_lambda; cudaHostAlloc((void**)&host_vx, (Max_group_dim+3)*BD_nx_vx*BD_nz_vx*sizeof(double), cudaHostAllocDefault); cudaHostAlloc((void**)&host_vy, (Max_group_dim+3)*BD_nx_vy*BD_nz_vy*sizeof(double), cudaHostAllocDefault); cudaHostAlloc((void**)&host_vz, (Max_group_dim+3)*BD_nx_vz*BD_nz_vz*sizeof(double), cudaHostAllocDefault); cudaHostAlloc((void**)&host_tpp, Max_group_dim*BD_nx_tpp*BD_nz_tpp*sizeof(double), cudaHostAllocDefault); cudaHostAlloc((void**)&host_rho, (Max_group_dim+1)*BD_nx_tpp*BD_nz_tpp*sizeof(double), cudaHostAllocDefault); cudaHostAlloc((void**)&host_lambda, Max_group_dim*BD_nx_tpp*BD_nz_tpp*sizeof(double), cudaHostAllocDefault); // device memory for each stream double *dev_vx[Max_num_stream], *dev_vy[Max_num_stream], *dev_vz[Max_num_stream]; double *dev_tpp[Max_num_stream]; double *dev_rho[Max_num_stream], *dev_lambda[Max_num_stream]; for(i=0 ; i<Max_num_stream ; i++){ cudaMalloc((void**)&dev_vx[i], (Max_stream_dim+3)*BD_nx_vx*BD_nz_vx*sizeof(double)); cudaMalloc((void**)&dev_vy[i], (Max_stream_dim+3)*BD_nx_vy*BD_nz_vy*sizeof(double)); cudaMalloc((void**)&dev_vz[i], (Max_stream_dim+3)*BD_nx_vz*BD_nz_vz*sizeof(double)); cudaMalloc((void**)&dev_txx[i], Max_stream_dim*BD_nx_tpp*BD_nz_tpp*sizeof(double)); cudaMalloc((void**)&dev_rho[i], (Max_stream_dim+1)*BD_nx_tpp*BD_nz_tpp*sizeof(double)); cudaMalloc((void**)&dev_lambda[i], Max_stream_dim*BD_nx_tpp*BD_nz_tpp*sizeof(double)); } int start_group_full = 0; int start_group_half = 0; int offset_group_full; int offset_group_half; int start_stream_full[Max_num_stream]; int start_stream_half[Max_num_stream]; int offset_stream_full[Max_num_stream]; int offset_stream_half[Max_num_stream]; for(gn=0 ; gn<Num_group; gn++) { start_stream_full[0] = 0; start_stream_half[0] = 0; for(sn=0;sn<Max_num_stream;sn++) { offset_stream_full[sn] = *(chk_stream_full+gn*(Max_num_stream+2)+sn+2); offset_stream_half[sn] = *(chk_stream_half+gn*(Max_num_stream+2)+sn+2); } for(sn=1;sn<Max_num_stream;sn++) { start_stream_full[sn] = start_stream_full[sn-1] + offset_stream_full[sn-1]; start_stream_half[sn] = start_stream_half[sn-1] + offset_stream_half[sn-1]; } } //************ time iteration ************// for(t=0;t<1;t++) { // *****************************************************************// // ********************** GPU particle velocities ******************// // *****************************************************************// start_group_full = 2; start_group_half = 1; for(gn=0 ; gn<Num_group; gn++) { start_group_full = start_group_full + *(chk_group_full+gn); start_group_half = start_group_half + *(chk_group_half+gn); offset_group_full = *(chk_group_full+gn+1); offset_group_half = *(chk_group_half+gn+1); // vx, vz, txx, tzz, txz memcpy(host_vx, vx+start_group_full*BD_nz_vx*BD_nx_vx, offset_group_full*BD_nz_vx*BD_nx_vx*sizeof(double)); memcpy(host_vy, vy+start_group_half*BD_nz_vy*BD_nx_vy, offset_group_half*BD_nz_vy*BD_nx_vy*sizeof(double)); memcpy(host_vz, vz+start_group_full*BD_nz_vz*BD_nx_vz, offset_group_full*BD_nz_vz*BD_nx_vz*sizeof(double)); memcpy(host_tpp, tpp+start_group_full*BD_nz_tpp*BD_nx_tpp, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(host_rho, rho+start_group_half*BD_nz_tpp*BD_nx_tpp, (offset_group_half+1)*BD_nz_tpp*BD_nx_tpp*sizeof(double)); // copy host memory data to device memory() for(sn=0 ; sn<*(chk_stream_full+gn*(Max_num_stream+2)); sn++) { cudaMemcpyAsync(dev_vx[sn], host_vx+start_stream_full[sn]*BD_nz_vx*BD_nx_vx, offset_stream_full[sn]*BD_nz_vx*BD_nx_vx*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_vz[sn], host_vz+start_stream_full[sn]*BD_nz_vz*BD_nx_vz, offset_stream_full[sn]*BD_nz_vz*BD_nx_vz*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_txx[sn], host_txx+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_tzz[sn], host_tzz+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_txz[sn], host_txz+start_stream_full[sn]*BD_nz_txz*BD_nx_txz, offset_stream_full[sn]*BD_nz_txz*BD_nx_txz*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_txy[sn], host_txy+start_stream_full[sn]*BD_nz_txy*BD_nx_txy, (offset_stream_full[sn]+3)*BD_nz_txy*BD_nx_txy*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_tyz[sn], host_tyz+start_stream_full[sn]*BD_nz_tyz*BD_nx_tyz, (offset_stream_full[sn]+3)*BD_nz_tyz*BD_nx_tyz*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_vy[sn], host_vy+start_stream_half[sn]*BD_nz_vy*BD_nx_vy, offset_stream_half[sn]*BD_nz_vy*BD_nx_vy*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_tyy[sn], host_tyy+start_stream_half[sn]*BD_nz_tpp*BD_nx_tpp, (offset_stream_half[sn]+3)*BD_nz_tpp*BD_nx_tpp*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_rho[sn], host_rho+start_stream_half[sn]*BD_nz_tpp*BD_nx_tpp, (offset_stream_half[sn]+1)*BD_nz_tpp*BD_nx_tpp*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); } // if(err!= cudaSuccess){printf("%s\n","error");} for(sn=0 ; sn<*(chk_stream_full+gn*(Max_num_stream+2)); sn++) { kernel_v<<<blocks,threads,0,stream[sn]>>> (dev_vx[sn], BD_nx_vx, BD_nz_vx, dev_vy[sn], BD_nx_vy, BD_nz_vy, dev_vz[sn], BD_nx_vz, BD_nz_vz, dev_txx[sn], dev_tyy[sn], dev_tzz[sn], BD_nx_tpp, BD_nz_tpp, dev_txy[sn], BD_nx_txy, BD_nz_txy, dev_tyz[sn], BD_nx_tyz, BD_nz_tyz, dev_txz[sn], BD_nx_txz, BD_nz_txz, dev_rho[sn], dev_fdc, dx, dy, dz, dt, offset_stream_full[sn], offset_stream_half[sn]); } for(sn=0 ; sn<*(chk_stream_full+gn*(Max_num_stream+2)); sn++) { cudaMemcpyAsync(host_vx+start_stream_full[sn]*BD_nz_vx*BD_nx_vx, dev_vx[sn], offset_stream_full[sn]*BD_nz_vx*BD_nx_vx*sizeof(double), cudaMemcpyDeviceToHost, stream[sn]); cudaMemcpyAsync(host_vz+start_stream_full[sn]*BD_nz_vz*BD_nx_vz, dev_vz[sn], offset_stream_full[sn]*BD_nz_vz*BD_nx_vz*sizeof(double), cudaMemcpyDeviceToHost, stream[sn]); cudaMemcpyAsync(host_vy+start_stream_half[sn]*BD_nz_vy*BD_nx_vy, dev_vy[sn], offset_stream_half[sn]*BD_nz_vy*BD_nx_vy*sizeof(double), cudaMemcpyDeviceToHost, stream[sn]); cudaMemcpyAsync(host_txx+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, dev_txx[sn], offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), cudaMemcpyDeviceToHost, stream[sn]); } cudaDeviceSynchronize(); memcpy(vx+start_group_full*BD_nz_vx*BD_nx_vx, host_vx, offset_group_full*BD_nz_vx*BD_nx_vx*sizeof(double)); memcpy(vy+start_group_half*BD_nz_vy*BD_nx_vy, host_vy, offset_group_half*BD_nz_vy*BD_nx_vy*sizeof(double)); memcpy(vz+start_group_full*BD_nz_vz*BD_nx_vz, host_vz, offset_group_full*BD_nz_vz*BD_nx_vz*sizeof(double)); memcpy(txx+start_group_full*BD_nz_tpp*BD_nx_tpp, host_txx, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); } // *************************************************// // ******* openmp particle velocity boundary *******// // *************************************************// // vxbtxx for(k=0; k<BD_ny_vx; k++) { for(i=0; i<BD_nz_vx; i++) { for(j=1; j<ext; j++) { bound_x(vx, BD_nz_vx, BD_nx_vx, BD_ny_vx, i, j, k, txx, BD_nz_tpp, BD_nx_tpp, 1, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+i+(BD_nx_vx-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(BD_nx_vx-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), dx, dt, pvxbtxx, bhalf, ahalf, ext, fdc); } } } // vybtxy for(k=0; k<BD_ny_vy; k++) { for(i=0; i<BD_nz_vy; i++) { for(j=2; j<ext; j++) { bound_x(vy, BD_nz_vy, BD_nx_vy, BD_ny_vy, i, j, k, txy, BD_nz_txy, BD_nx_txy, 2, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+i+(BD_nx_vy-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(BD_nx_vy-1-j)*BD_nz_tpp+(BD_ny_vy-k)*BD_nz_tpp*BD_nx_tpp)), dx, dt, pvybtxy, bfull, afull, ext, fdc); } } } // vzbtxz for(k=0; k<BD_ny_vz; k++) { for(i=0; i<BD_nz_vz; i++) { for(j=2; j<ext; j++) { bound_x(vz, BD_nz_vz, BD_nx_vz, BD_ny_vz, i, j, k, txz, BD_nz_txz, BD_nx_txz, 2, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+i+(BD_nx_vz-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(i+1)+(BD_nx_vz-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), dx,dt, pvzbtxz, bfull, afull, ext, fdc); } } } //********************* V_Y *********************// // vxbtxy for(j=0; j<BD_nx_vx; j++) { for(i=0; i<BD_nz_vx; i++) { for(k=2; k<ext; k++) { bound_y(vx, BD_nz_vx, BD_nx_vx, BD_ny_vx, i, j, k, txy, BD_nz_txy, BD_nx_txy, 2, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+i+j*BD_nz_tpp+(BD_ny_vx-1-k)*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(j+1)*BD_nz_tpp+(BD_ny_vx-1-k)*BD_nz_tpp*BD_nx_tpp)), dy,dt, pvxbtxy, bfull, afull, ext, fdc); } } } // vybtyy for(j=0; j<BD_nx_vy; j++) { for(i=0; i<BD_nz_vy; i++) { for(k=1; k<ext; k++) { bound_y(vy, BD_nz_vy, BD_nx_vy, BD_ny_vy, i, j, k, tyy, BD_nz_tpp, BD_nx_tpp, 1, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+i+j*BD_nz_tpp+(BD_ny_vy-1-k)*BD_nz_tpp*BD_nx_tpp)+*(rho+i+j*BD_nz_tpp+(BD_ny_vy-k)*BD_nz_tpp*BD_nx_tpp)), dy,dt, pvybtyy, bhalf, ahalf, ext, fdc); } } } // vzbtyz for(j=0; j<BD_nx_vz; j++) { for(i=0; i<BD_nz_vz; i++) { for(k=2; k<ext; k++) { bound_y(vz, BD_nz_vz, BD_nx_vz, BD_ny_vz, i, j, k, tyz, BD_nz_tyz, BD_nx_tyz, 2, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+i+j*BD_nz_tpp+(BD_ny_vz-1-k)*BD_nz_tpp*BD_nx_tpp)+*(rho+(i+1)+j*BD_nz_tpp+(BD_ny_vz-1-k)*BD_nz_tpp*BD_nx_tpp)), dy,dt, pvzbtyz, bfull, afull, ext, fdc); } } } //********************* V_Z *********************// // vxbtxz for(j=0; j<BD_nx_vx; j++) { for(k=0; k<BD_ny_vx; k++) { for(i=2; i<ext; i++) { unlimited_bound_z(vx, BD_nz_vx, BD_nx_vx, BD_ny_vx, i, j, k, txz, BD_nz_txz, BD_nx_txz, 2, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+(BD_nz_vx-1-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(BD_nz_vx-1-i)+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), dz,dt, pvxbtxz, bfull, afull, ext, fdc); } } } // vybtyz for(j=0; j<BD_nx_vy; j++) { for(k=0; k<BD_ny_vy; k++) { for(i=2; i<ext; i++) { unlimited_bound_z(vy, BD_nz_vy, BD_nx_vy, BD_ny_vy, i, j, k, tyz, BD_nz_tyz, BD_nx_tyz, 2, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+(BD_nz_vy-1-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(BD_nz_vy-1-i)+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp)), dz,dt, pvybtyz, bfull, afull, ext, fdc); } } } // vzbtzz for(j=0; j<BD_nx_vz; j++) { for(k=0; k<BD_ny_vz; k++) { for(i=1;i<ext;i++) { unlimited_bound_z(vz, BD_nz_vz, BD_nx_vz, BD_ny_vz, i, j, k, tzz, BD_nz_tpp, BD_nx_tpp, 1, 2.0/(*(rho+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), 2.0/(*(rho+(BD_nz_vz-1-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)+*(rho+(BD_nz_vz-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp)), dz,dt, pvzbtzz, bhalf, ahalf, ext, fdc); } } } //*****************************************************************// //**************************** GPU stress *************************// //*****************************************************************// start_group_full = 2; start_group_half = 1; for(gn=0 ; gn<Num_group; gn++) { start_group_full = start_group_full + *(chk_group_full+gn); start_group_half = start_group_half + *(chk_group_half+gn); offset_group_full = *(chk_group_full+gn+1); offset_group_half = *(chk_group_half+gn+1); // vx, vz, txx, tzz, txz memcpy(host_txx, txx+start_group_full*BD_nz_tpp*BD_nx_tpp, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(host_tzz, tzz+start_group_full*BD_nz_tpp*BD_nx_tpp, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(host_tyy, tyy+start_group_full*BD_nz_tpp*BD_nx_tpp, offset_group_half*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(host_txy, txy+start_group_half*BD_nz_txy*BD_nx_txy, offset_group_half*BD_nz_txy*BD_nx_txy*sizeof(double)); memcpy(host_tyz, tyz+start_group_half*BD_nz_tyz*BD_nx_tyz, offset_group_half*BD_nz_tyz*BD_nx_tyz*sizeof(double)); memcpy(host_txz, txz+start_group_full*BD_nz_txz*BD_nx_txz, offset_group_full*BD_nz_txz*BD_nx_txz*sizeof(double)); memcpy(host_vx, vx+(start_group_half-1)*BD_nz_vx*BD_nx_vx, (offset_group_half+3)*BD_nz_vx*BD_nx_vx*sizeof(double)); memcpy(host_vy, vy+(start_group_full-2)*BD_nz_vy*BD_nx_vy, (offset_group_full+3)*BD_nz_vy*BD_nx_vy*sizeof(double)); memcpy(host_vz, vz+(start_group_half-1)*BD_nz_vz*BD_nx_vz, (offset_group_half+3)*BD_nz_vz*BD_nx_vz*sizeof(double)); memcpy(host_lambda, lambda+start_group_full*BD_nz_tpp*BD_nx_tpp, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(host_mu, mu+start_group_half*BD_nz_tpp*BD_nx_tpp, (offset_group_half+1)*BD_nz_tpp*BD_nx_tpp*sizeof(double)); // copy host memory data to device memory() for(sn=0 ; sn<*(chk_stream_full+gn*(Max_num_stream+2)); sn++) { cudaMemcpyAsync(dev_txx[sn], host_txx+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_tzz[sn], host_tzz+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_tyy[sn], host_tyy+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_txz[sn], host_txz+start_stream_full[sn]*BD_nz_txz*BD_nx_txz, offset_stream_full[sn]*BD_nz_txz*BD_nx_txz*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_txy[sn], host_txy+start_stream_half[sn]*BD_nz_txy*BD_nx_txy, offset_stream_half[sn]*BD_nz_txy*BD_nx_txy*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_tyz[sn], host_tyz+start_stream_half[sn]*BD_nz_tyz*BD_nx_tyz, offset_stream_half[sn]*BD_nz_tyz*BD_nx_tyz*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_vx[sn], host_vx+start_stream_half[sn]*BD_nz_vx*BD_nx_vx, (offset_stream_half[sn]+3)*BD_nz_vx*BD_nx_vx*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_vz[sn], host_vz+start_stream_half[sn]*BD_nz_vz*BD_nx_vz, (offset_stream_half[sn]+3)*BD_nz_vz*BD_nx_vz*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_vy[sn], host_vy+start_stream_full[sn]*BD_nz_vy*BD_nx_vy, (offset_stream_full[sn]+3)*BD_nz_vy*BD_nx_vy*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_lambda[sn], host_lambda+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); cudaMemcpyAsync(dev_mu[sn], host_mu+start_stream_half[sn]*BD_nz_tpp*BD_nx_tpp, (offset_stream_half[sn]+1)*BD_nz_tpp*BD_nx_tpp*sizeof(double), cudaMemcpyHostToDevice, stream[sn]); } for(sn=0 ; sn<*(chk_stream_full+gn*(Max_num_stream+2)); sn++) { kernel_tau<<<blocks,threads,0,stream[sn]>>> (dev_vx[sn], BD_nx_vx, BD_nz_vx, dev_vy[sn], BD_nx_vy, BD_nz_vy, dev_vz[sn], BD_nx_vz, BD_nz_vz, dev_txx[sn], dev_tyy[sn], dev_tzz[sn], BD_nx_tpp, BD_nz_tpp, dev_txy[sn], BD_nx_txy, BD_nz_txy, dev_tyz[sn], BD_nx_tyz, BD_nz_tyz, dev_txz[sn], BD_nx_txz, BD_nz_txz, dev_lambda[sn], dev_mu[sn], dev_fdc, dx, dy, dz, dt, offset_stream_full[sn], offset_stream_half[sn]); } for(sn=0 ; sn<*(chk_stream_full+gn*(Max_num_stream+2)); sn++) { cudaMemcpyAsync(host_txx+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, dev_txx[sn], offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), cudaMemcpyDeviceToHost, stream[sn]); cudaMemcpyAsync(host_tyy+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, dev_tyy[sn], offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), cudaMemcpyDeviceToHost, stream[sn]); cudaMemcpyAsync(host_tzz+start_stream_full[sn]*BD_nz_tpp*BD_nx_tpp, dev_tzz[sn], offset_stream_full[sn]*BD_nz_tpp*BD_nx_tpp*sizeof(double), cudaMemcpyDeviceToHost, stream[sn]); cudaMemcpyAsync(host_txz+start_stream_full[sn]*BD_nz_txz*BD_nx_txz, dev_txz[sn], offset_stream_full[sn]*BD_nz_txz*BD_nx_txz*sizeof(double), cudaMemcpyDeviceToHost, stream[sn]); cudaMemcpyAsync(host_txy+start_stream_half[sn]*BD_nz_txy*BD_nx_txy, dev_txy[sn], offset_stream_half[sn]*BD_nz_txy*BD_nx_txy*sizeof(double), cudaMemcpyDeviceToHost, stream[sn]); cudaMemcpyAsync(host_tyz+start_stream_half[sn]*BD_nz_tyz*BD_nx_tyz, dev_tyz[sn], offset_stream_half[sn]*BD_nz_tyz*BD_nx_tyz*sizeof(double), cudaMemcpyDeviceToHost, stream[sn]); } cudaDeviceSynchronize(); memcpy(txx+start_group_full*BD_nz_tpp*BD_nx_tpp, host_txx, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(tzz+start_group_full*BD_nz_tpp*BD_nx_tpp, host_tzz, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(tyy+start_group_full*BD_nz_tpp*BD_nx_tpp, host_tyy, offset_group_full*BD_nz_tpp*BD_nx_tpp*sizeof(double)); memcpy(txy+start_group_half*BD_nz_txy*BD_nx_txy, host_txy, offset_group_half*BD_nz_txy*BD_nx_txy*sizeof(double)); memcpy(tyz+start_group_half*BD_nz_tyz*BD_nx_tyz, host_tyz, offset_group_half*BD_nz_tyz*BD_nx_tyz*sizeof(double)); memcpy(txz+start_group_full*BD_nz_txz*BD_nx_txz, host_txz, offset_group_full*BD_nz_txz*BD_nx_txz*sizeof(double)); } //*************************************************// //******* openmp stress boundary *******// //*************************************************// // #pragma omp for collapse(3) for(k=0; k<BD_ny_tpp; k++) { for(i=0; i<BD_nz_tpp; i++) { for(j=2; j<ext; j++) { bound_tpp_x(txx, tyy, tzz, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp, i, j, k, vx, BD_nz_vx, BD_nx_vx, 2, lambda, mu, dx, dt, ptxxbvx, ptyybvx, ptzzbvx, bhalf, ahalf, ext, fdc); } } } // #pragma omp for collapse(3) for(k=0; k<BD_ny_txy; k++) { for(i=0; i<BD_nz_txy; i++) { for(j=1; j<ext; j++) { bound_x(txy, BD_nz_txy, BD_nx_txy, BD_ny_txy, i, j, k, vy, BD_nz_vy, BD_nx_vy, 1, (*(mu+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp))/4, (*(mu+i+(BD_nx_txy-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(BD_nx_txy-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(BD_nx_txy-j)*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(BD_nx_txy-1-j)*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp))/4, dx, dt, ptxybvy, bfull, afull, ext, fdc); } } } // // #pragma omp for collapse(3) for(k=0; k<BD_ny_txz; k++) { for(i=0; i<BD_nz_txz; i++) { for(j=1; j<ext; j++) { bound_x(txz, BD_nz_txz, BD_nx_txz, BD_ny_txz, i, j, k, vz, BD_nz_vz, BD_nx_vz, 1, (*(mu+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp))/4, (*(mu+i+(BD_nx_txz-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(BD_nx_txz-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+(BD_nx_txz-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+(BD_nx_txz-1-j)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp))/4, dx, dt, ptxzbvz, bfull, afull, ext, fdc); } } } // //************ T_Y ************// // #pragma omp for collapse(3) for(i=0; i<BD_nz_tpp; i++) { for(j=0; j<BD_nx_tpp; j++) { for(k=2; k<ext; k++) { bound_tpp_y(txx, tyy, tzz, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp, i, j, k, vy, BD_nz_vy, BD_nx_vy, 2, lambda, mu, dy, dt, ptxxbvy, ptyybvy, ptzzbvy, bhalf, ahalf, ext, fdc); } } } // // #pragma omp for collapse(3) for(i=0; i<BD_nz_txy; i++) { for(j=0; j<BD_nx_txy; j++) { for(k=1; k<ext; k++) { bound_y(txy, BD_nz_txy, BD_nx_txy, BD_ny_txy, i, j, k, vx, BD_nz_vx, BD_nx_vx, 1, (*(mu+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp))/4, (*(mu+i+j*BD_nz_tpp+(BD_ny_txy-k-1)*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+(BD_ny_txy-k-1)*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+(BD_ny_txy-k)*BD_nz_tpp*BD_nx_tpp) + *(mu+i+j*BD_nz_tpp+(BD_ny_txy-k)*BD_nz_tpp*BD_nx_tpp))/4, dy, dt, ptxybvx, bfull, afull, ext, fdc); } } } for(i=0; i<BD_nz_tyz; i++) { for(j=0; j<BD_nx_tyz; j++) { for(k=1; k<ext; k++) { bound_y(tyz, BD_nz_tyz, BD_nx_tyz, BD_ny_tyz, i, j, k, vz, BD_nz_vz, BD_nx_vz, 1, (*(mu+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp))/4, (*(mu+i+j*BD_nz_tpp+(BD_ny_tyz-1-k)*BD_nz_tpp*BD_nx_tpp) + *(mu+i+j*BD_nz_tpp+(BD_ny_tyz-k)*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+(BD_ny_tyz-k)*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+(BD_ny_tyz-1-k)*BD_nz_tpp*BD_nx_tpp))/4, dy, dt, ptyzbvz, bfull, afull, ext, fdc); } } } //************ T_Z ************// // #pragma omp for collapse(3) for(j=0; j<BD_nx_tpp; j++) { for(k=0; k<BD_ny_tpp; k++) { for(i=2; i<ext; i++) { unlimited_bound_tpp_z(txx, tyy, tzz, BD_nz_tpp, BD_nx_tpp, BD_ny_tpp, i, j, k, vz, BD_nz_vz, BD_nx_vz, 2, lambda, mu, dz, dt, ptxxbvz, ptyybvz, ptzzbvz, bhalf, ahalf, ext, fdc); } } } // // #pragma omp for collapse(3) for(j=0; j<BD_nx_txz; j++) { for(k=0; k<BD_ny_txz; k++) { for(i=1; i<ext; i++) { unlimited_bound_z(txz, BD_nz_txz, BD_nx_txz, BD_ny_txz, i, j, k, vx, BD_nz_vx, BD_nx_vx, 1, (*(mu+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp))/4, (*(mu+(BD_nz_txz-1-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(BD_nz_txz-1-i)+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(BD_nz_txz-i)+(j+1)*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(BD_nz_txz-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp))/4, dz, dt, ptxzbvx, bfull, afull, ext, fdc); } } } // // #pragma omp for collapse(3) for(j=0; j<BD_nx_tyz; j++) { for(k=0; k<BD_ny_tyz; k++) { for(i=0; i<ext; i++) { unlimited_bound_z(tyz, BD_nz_tyz, BD_nx_tyz, BD_ny_tyz, i, j, k, vy, BD_nz_vy, BD_nx_vy, 1, (*(mu+i+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+i+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+(i+1)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp))/4, (*(mu+(BD_nz_tyz-1-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp) + *(mu+(BD_nz_tyz-1-i)+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+(BD_nz_tyz-i)+j*BD_nz_tpp+(k+1)*BD_nz_tpp*BD_nx_tpp) + *(mu+(BD_nz_tyz-i)+j*BD_nz_tpp+k*BD_nz_tpp*BD_nx_tpp))/4, dz, dt, ptyzbvy, bfull, afull, ext, fdc); } } } } for(i=0 ; i<Max_num_stream ; i++) { cudaFree(dev_vx[i]); cudaFree(dev_vy[i]); cudaFree(dev_vz[i]); cudaFree(dev_txx[i]); cudaFree(dev_tyy[i]); cudaFree(dev_tzz[i]); cudaFree(dev_txy[i]); cudaFree(dev_tyz[i]); cudaFree(dev_txz[i]); cudaFree(dev_rho[i]); cudaFree(dev_lambda[i]); cudaFree(dev_mu[i]); cudaStreamDestroy(stream[i]); } cudaFreeHost(host_vx); cudaFreeHost(host_vy); cudaFreeHost(host_vz); cudaFreeHost(host_txx); cudaFreeHost(host_tyy); cudaFreeHost(host_tzz); cudaFreeHost(host_txy); cudaFreeHost(host_tyz); cudaFreeHost(host_txz); cudaFreeHost(host_rho); cudaFreeHost(host_lambda); cudaFreeHost(host_mu); } }
817dd4fa8bfbafd13191c51cc5b987ee7bfc8323.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*************************************************************************************************** Example contrasting the Stream-K parallel decomposition for GEMM threadblocks versus the "classic data-parallel" and "Split-K" decompositions + residual add. For more details regarding the Stream-K method, see "Stream-K: Work-centric Parallel Decomposition for Dense Matrix-Matrix Multiplication on the GPU" (https://arxiv.org/abs/2301.03598) Requires NVIDIA Ampere or newer device (SM80+). - To lock persistence mode, power (400W), clocks (1005MHz) for evaluation (assumes device 0 and A100) cutlass$ sudo nvidia-smi -pm 1 -i 0 cutlass$ sudo nvidia-smi -i 0 -pl 400 cutlass$ sudo nvidia-smi -i 0 -lgc 1005 - Build and run: cutlass$ mkdir build cutlass$ cd build cutlass/build$ cmake .. -DCUTLASS_NVCC_ARCHS=80 cutlass/build$ make 47_ampere_gemm_universal_streamk_broadcast cutlass/build$ ./examples/47_ampere_gemm_universal_streamk/47_ampere_gemm_universal_streamk_broadcast - Reset clocks when done: cutlass$ sudo nvidia-smi -rgc **************************************************************************************************/ #include <iostream> #include <string> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_universal.h" #include "cutlass/gemm/device/gemm_universal_with_broadcast.h" #include "cutlass/gemm/device/gemm_universal_streamk_with_broadcast.h" #include "cutlass/epilogue/thread/linear_combination_residual_block.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_foreach.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// GEMM kernel configurations (cutlass_tensorop_h16816gemm_128x128_32x4_nn_align8) ///////////////////////////////////////////////////////////////////////////////////////////////// // A matrix configuration using ElementA = cutlass::half_t; // Element type for A matrix operand using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes) // B matrix configuration using ElementB = cutlass::half_t; // Element type for B matrix operand using LayoutB = cutlass::layout::RowMajor; // Layout type for B matrix operand constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes) // C1/C2/D matrix configuration using ElementC = cutlass::half_t; // Element type for C matrix operands using LayoutC = cutlass::layout::RowMajor; // Layout type for C matrix operands constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value; // Memory access granularity/alignment of C matrices in units of elements (up to 16 bytes) // Output matrix configuration using ElementOutput = cutlass::half_t; // Element type for output matrix operands using LayoutOutput = cutlass::layout::RowMajor; // Layout type for output matrix operands // constexpr int AlignmentOutput = 128 / cutlass::sizeof_bits<ElementOutput>::value; // Memory access granularity/alignment of output matrices in units of elements (up to 16 bytes) // Multiply-accumulate blocking/pipelining details using ElementAccumulator = cutlass::half_t; // Element type for internal accumulation using ElementCompute = cutlass::half_t; // Element type for compute using ArchTag = cutlass::arch::Sm80; // Tag indicating the minimum SM that supports the intended feature using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock-level tile size (concept: GemmShape) using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp-level tile size (concept: GemmShape) using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // Instruction-level tile size (concept: GemmShape) constexpr int NumStages = 4; // Number of global->shared pipeline stages used in the GEMM mainloop // Residual block configuration // Epilogue output operator /// Using LinearCombinationResidualBlock /// Models a residual block of the form: UnaryOp(BinaryOp(BinaryOp(ActivationOp(TensorOp(X) + bias), residual1), residual2)) using EpilogueOp = cutlass::epilogue::thread::LinearCombinationResidualBlock< ElementOutput, // Element type for output matrix ElementAccumulator, // Element type from internal accumulation ElementCompute, // Element type from internal accumulation ElementC, // Element type for C1/C2/D matrix operands AlignmentC, // Memory access granularity of C and D matrix in units of elements cutlass::epilogue::thread::Identity, // Activation cutlass::plus, // Binary operation 1 cutlass::epilogue::thread::Identity, // Unary operation cutlass::plus // Binary operation 2 >; // Reference device GEMM implementation type using DeviceGemmReference = cutlass::reference::device::Gemm< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, ElementAccumulator>; // Classic data-parallel device GEMM implementation type using DeviceGemmBasic = cutlass::gemm::device::GemmUniversalWithBroadcast< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, NumStages, AlignmentA, AlignmentB>; // StreamK device GEMM implementation type using DeviceGemmStreamK = cutlass::gemm::device::GemmUniversalStreamkWithBroadcast< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::ThreadblockSwizzleStreamK, NumStages, AlignmentA, AlignmentB>; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Testbed utility types ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double avg_runtime_ms; double gflops; cutlass::Status status; hipError_t error; bool passed; Result( double avg_runtime_ms = 0, double gflops = 0, cutlass::Status status = cutlass::Status::kSuccess, hipError_t error = hipSuccess) : avg_runtime_ms(avg_runtime_ms), gflops(gflops), status(status), error(error), passed(true) {} }; /// Command line options parsing struct Options { std::string command_name; bool help; cutlass::gemm::GemmCoord problem_size; float alpha; float beta; int split_k_factor; int avail_sms; int iterations; bool real; cutlass::HostTensor<ElementA, LayoutA> tensor_a; cutlass::HostTensor<ElementB, LayoutB> tensor_b; cutlass::HostTensor<ElementC, LayoutC> tensor_c1; cutlass::HostTensor<ElementC, LayoutC> tensor_c2; cutlass::HostTensor<ElementC, LayoutC> tensor_d; cutlass::HostTensor<ElementC, LayoutC> tensor_ref_d; cutlass::HostTensor<ElementC, LayoutC> tensor_Vector; // cutlass::HostTensor<ElementC, LayoutC> tensor_Tensor; Options(std::string command_name) : command_name(command_name), help(false), problem_size({2048, 2048, 2048}), alpha(1.0f), beta(1.0f), split_k_factor(1), avail_sms(-1), // Number of device SMs to use is unlimited real(false), iterations(10000) {} bool valid() const { return true; } void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("split", split_k_factor); cmd.get_cmd_line_argument("iterations", iterations); real = cmd.check_cmd_line_flag("real"); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "Performs a GEMM computation.\n" << "\n" << "Options:\n" << "\n" << " --help If specified, displays this usage statement.\n\n" << " --m=<int> GEMM M dimension\n" << " --n=<int> GEMM N dimension\n" << " --k=<int> GEMM K dimension\n" << " --alpha=<f32> Epilogue scalar alpha\n" << " --beta=<f32> Epilogue scalar beta\n\n" << " --split=<int> Split-K factor to emulate\n\n" << " --real If specified, initializes with real values instead of whole numbers. Errors are to be expected.\n\n" << " --iterations=<int> Number of profiling iterations to perform.\n\n"; out << "\n\nExamples:\n\n" << "$ " << command_name << " --m=1024 --n=512 --k=1024 --alpha=2 --beta=0.707 \n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Two flops per multiply-add return 2.0 * double(problem_size.product()) / double(1.0e9) / runtime_s; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// GEMM evaluation ///////////////////////////////////////////////////////////////////////////////////////////////// /// Populates a DeviceGemmBasic::Arguments structure from the given commandline options typename DeviceGemmBasic::Arguments args_from_options( const DeviceGemmBasic &device_gemm, const Options &options, cutlass::HostTensor<ElementA, LayoutA> &tensor_a, cutlass::HostTensor<ElementB, LayoutB> &tensor_b, cutlass::HostTensor<ElementC, LayoutC> &tensor_c1, cutlass::HostTensor<ElementC, LayoutC> &tensor_c2, cutlass::HostTensor<ElementC, LayoutC> &tensor_d, cutlass::HostTensor<ElementC, LayoutC> &tensor_Vector /*, cutlass::HostTensor<ElementC, LayoutC> &tensor_Tensor */ ) { return typename DeviceGemmBasic::Arguments( cutlass::gemm::GemmUniversalMode::kGemm, // universal mode options.problem_size, // problem_size options.split_k_factor, // batch count / splitk slices { // epilogue parameters ElementAccumulator(options.alpha), ElementAccumulator(options.beta) }, tensor_a.device_data(), // ptr_A tensor_b.device_data(), // ptr_B tensor_c1.device_data(), // ptr_C1 tensor_c2.device_data(), // ptr_C2 tensor_d.device_data(), // ptr_D tensor_Vector.device_data(), // ptr_Vector /* tensor_Tensor.device_data(), */nullptr,// ptr_Tensor options.problem_size.mk().product(), // batch_stride_A options.problem_size.nk().product(), // batch_stride_B options.problem_size.mn().product(), // batch_stride_C1 options.problem_size.mn().product(), // batch_stride_C2 options.problem_size.mn().product(), // batch_stride_D options.problem_size.mn().product(), // batch_stride_Vector options.problem_size.mn().product(), // batch_stride_Tensor tensor_a.layout().stride(0), // stride_a tensor_b.layout().stride(0), // stride_b tensor_c1.layout().stride(0), // stride_c1 tensor_c2.layout().stride(0), // stride_c2 tensor_d.layout().stride(0), // stride_d /*tensor_Vector.layout().stride(0)*/0, // stride_Vector /*tensor_Tensor.layout().stride(0)*/0); // stride_Tensor } /// Populates a DeviceGemmStreamK::Arguments structure from the given commandline options typename DeviceGemmStreamK::Arguments args_from_options( const DeviceGemmStreamK &device_gemm, const Options &options, cutlass::HostTensor<ElementA, LayoutA> &tensor_a, cutlass::HostTensor<ElementB, LayoutB> &tensor_b, cutlass::HostTensor<ElementC, LayoutC> &tensor_c1, cutlass::HostTensor<ElementC, LayoutC> &tensor_c2, cutlass::HostTensor<ElementC, LayoutC> &tensor_d, cutlass::HostTensor<ElementC, LayoutC> &tensor_Vector/*, cutlass::HostTensor<ElementC, LayoutC> &tensor_Tensor*/ ) { return typename DeviceGemmStreamK::Arguments( cutlass::gemm::GemmUniversalMode::kGemm, // universal mode options.problem_size, // problem_size options.split_k_factor, // batch count / splitk slices { // epilogue parameters ElementAccumulator(options.alpha), ElementAccumulator(options.beta) }, tensor_a.device_data(), // ptr_A tensor_b.device_data(), // ptr_B tensor_c1.device_data(), // ptr_C1 tensor_c2.device_data(), // ptr_C2 tensor_d.device_data(), // ptr_D tensor_Vector.device_data(), // ptr_Vector /* tensor_Tensor.device_data(), */nullptr,// ptr_Tensor // We're not storing Tensor options.problem_size.mk().product(), // batch_stride_A options.problem_size.nk().product(), // batch_stride_B options.problem_size.mn().product(), // batch_stride_C1 options.problem_size.mn().product(), // batch_stride_C2 options.problem_size.mn().product(), // batch_stride_D options.problem_size.mn().product(), // batch_stride_Vector options.problem_size.mn().product(), // batch_stride_Tensor tensor_a.layout().stride(0), // stride_a tensor_b.layout().stride(0), // stride_b tensor_c1.layout().stride(0), // stride_c1 tensor_c2.layout().stride(0), // stride_c2 tensor_d.layout().stride(0), // stride_d /*tensor_Vector.layout().stride(0)*/0, // stride_Vector // Vector stride is always 0 /*tensor_Tensor.layout().stride(0)*/0, // stride_Tensor // We're not storing Tensor options.avail_sms); // avail_sms } /// Execute a given example GEMM computation template <typename DeviceGemmT> Result run(std::string description, Options &options) { // Display test description std::cout << std::endl << description << std::endl; // Zero-initialize test output matrix D cutlass::reference::host::TensorFill(options.tensor_d.host_view()); options.tensor_d.sync_device(); // Instantiate CUTLASS kernel depending on templates DeviceGemmT device_gemm; // Create a structure of gemm kernel arguments suitable for invoking an instance of DeviceGemmT auto arguments = args_from_options(device_gemm, options, options.tensor_a, options.tensor_b, options.tensor_c1, options.tensor_c2, options.tensor_d, options.tensor_Vector/*, options.tensor_Tensor*/); // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = DeviceGemmT::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Check the problem size is supported or not CUTLASS_CHECK(device_gemm.can_implement(arguments)); // Initialize CUTLASS kernel with arguments and workspace pointer CUTLASS_CHECK(device_gemm.initialize(arguments, workspace.get())); // Correctness / Warmup iteration CUTLASS_CHECK(device_gemm()); // Copy output data from CUTLASS and reference kernel to host for comparison options.tensor_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not Result result; result.passed = cutlass::reference::host::TensorEquals( options.tensor_d.host_view(), options.tensor_ref_d.host_view()); double err = cutlass::reference::host::TensorRelativeErrorMetric( options.tensor_d.host_view(), options.tensor_ref_d.host_view()); std::cout << " Disposition: " << (result.passed ? "Passed" : "Failed") << " \t Relative error: " << err << std::endl; // Run profiling loop if (options.iterations > 0) { GpuTimer timer; timer.start(); for (int iter = 0; iter < options.iterations; ++iter) { CUTLASS_CHECK(device_gemm()); } timer.stop(); // Compute average runtime and GFLOPs. float elapsed_ms = timer.elapsed_millis(); result.avg_runtime_ms = double(elapsed_ms) / double(options.iterations); result.gflops = options.gflops(result.avg_runtime_ms / 1000.0); std::cout << " Avg runtime: " << result.avg_runtime_ms << " ms" << std::endl; std::cout << " GFLOPs: " << result.gflops << std::endl; } // TODO: uncomment when results match //if (!result.passed) { // exit(-1); //} return result; } /// Program entrypoint int main(int argc, const char **argv) { // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } // Current device must must have compute capability at least 80 hipDeviceProp_t props; int current_device_id; CUDA_CHECK(hipGetDevice(&current_device_id)); CUDA_CHECK(hipGetDeviceProperties(&props, current_device_id)); if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } // Parse commandline options Options options("ampere_streamk_broadcast_gemm"); options.parse(argc, argv); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } std::cout << options.iterations << " timing iterations of " << options.problem_size.m() << " x " << options.problem_size.n() << " x " << options.problem_size.k() << " matrix-matrix multiply" << std::endl; if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } // // Initialize GEMM datasets // // Initialize tensors using CUTLASS helper functions options.tensor_a.resize(options.problem_size.mk()); // <- Create matrix A with dimensions M x K options.tensor_b.resize(options.problem_size.kn()); // <- Create matrix B with dimensions K x N options.tensor_c1.resize(options.problem_size.mn()); // <- Create matrix C1 with dimensions M x N options.tensor_c2.resize(options.problem_size.mn()); // <- Create matrix C2 with dimensions M x N options.tensor_d.resize(options.problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from CUTLASS kernel options.tensor_ref_d.resize(options.problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from reference kernel options.tensor_Vector.resize({1, options.problem_size.n()}); // <- Create broadcast vector with dimensions N x 1 // options.tensor_Tensor.resize(options.problem_size.mn()); // <- Create T matrix with dimensions M x N int _init_bits = options.real ? -1 : 0; // Fill matrix A on host with uniform-random data [-2, 2] cutlass::reference::host::TensorFillRandomUniform( options.tensor_a.host_view(), 1, ElementA(2), ElementA(-2), _init_bits); // Fill matrix B on host with uniform-random data [-2, 2] cutlass::reference::host::TensorFillRandomUniform( options.tensor_b.host_view(), 1, ElementB(2), ElementB(-2), _init_bits); // Fill matrix C1 on host with uniform-random data [-2, 2] cutlass::reference::host::TensorFillRandomUniform( options.tensor_c1.host_view(), 1, ElementC(2), ElementC(-2), _init_bits); // Fill matrix C2 on host with uniform-random data [-2, 2] cutlass::reference::host::TensorFillRandomUniform( options.tensor_c2.host_view(), 1, ElementC(2), ElementC(-2), _init_bits); cutlass::reference::host::TensorFillRandomUniform( options.tensor_Vector.host_view(), 1, ElementC(2), ElementC(-2), _init_bits); // // Compute reference output // // Copy data from host to GPU options.tensor_a.sync_device(); options.tensor_b.sync_device(); options.tensor_c1.sync_device(); options.tensor_c2.sync_device(); options.tensor_Vector.sync_device(); // options.tensor_Tensor.sync_device(); // Zero-initialize reference output matrix D cutlass::reference::host::TensorFill(options.tensor_ref_d.host_view()); options.tensor_ref_d.sync_device(); // Create instantiation for device reference gemm kernel DeviceGemmReference gemm_reference; // Launch device reference gemm kernel gemm_reference( options.problem_size, ElementAccumulator(options.alpha), options.tensor_a.device_ref(), options.tensor_b.device_ref(), ElementAccumulator(options.beta), options.tensor_c1.device_ref(), options.tensor_ref_d.device_ref()); // Wait for kernels to finish CUDA_CHECK(hipDeviceSynchronize()); // Copy output data from reference kernel to host for comparison options.tensor_ref_d.sync_host(); // Add broadcast vector (without multiplier) // This is only possible because BinaryOp is addition, and UnaryOps are identity. // This makes the addition of broadcast vector commutable. /// identity(plus(identity(alpha * (a * b) + v), beta * c)) == /// alpha * a * b + v + beta * c == /// (alpha * a * b + beta * c) + v == /// GEMM(a, b, c) + v // Vector broadcast on host for (int i=0; i < options.problem_size.m(); ++i) { for (int j=0; j < options.problem_size.n(); ++j) { options.tensor_ref_d.host_view().ref().at({i, j}) += options.tensor_Vector.host_view().ref().at({0, j}); options.tensor_ref_d.host_view().ref().at({i, j}) += options.tensor_c2.host_view().ref().at({i, j}); } } // Sync back with device just in case options.tensor_ref_d.sync_device(); // // Evaluate CUTLASS kernels // // Test default operation if (options.split_k_factor == 1) { // Compare basic data-parallel version versus StreamK version using default load-balancing heuristics Result basic_dp = run<DeviceGemmBasic>("Basic data-parallel GEMM", options); Result streamk_default = run<DeviceGemmStreamK>("StreamK GEMM with default load-balancing", options); printf(" Speedup vs Basic-DP: %.3f\n", (basic_dp.avg_runtime_ms / streamk_default.avg_runtime_ms)); // Show that StreamK can emulate basic data-parallel GEMM when we set the number of SMs to load-balance across = 1 options.avail_sms = 1; // Set loadbalancing width to 1 SM (no load balancing) Result streamk_dp = run<DeviceGemmStreamK>("StreamK emulating basic data-parallel GEMM", options); options.avail_sms = -1; // Reset loadbalancing width to unspecified SMs (i.e., the number of device SMs) printf(" Speedup vs Basic-DP: %.3f\n", (basic_dp.avg_runtime_ms / streamk_dp.avg_runtime_ms)); options.split_k_factor++; // Increment splitting factor for next evaluation } // Show that StreamK can emulate "Split-K" with a tile-splitting factor Result basic_splitk = run<DeviceGemmBasic>( std::string("Basic split-K GEMM with tile-splitting factor ") + std::to_string(options.split_k_factor), options); Result streamk_splitk = run<DeviceGemmStreamK>( std::string("StreamK emulating Split-K GEMM with tile-splitting factor ") + std::to_string(options.split_k_factor), options); printf(" Speedup vs Basic-SplitK: %.3f\n", (basic_splitk.avg_runtime_ms / streamk_splitk.avg_runtime_ms)); return 0; }
817dd4fa8bfbafd13191c51cc5b987ee7bfc8323.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*************************************************************************************************** Example contrasting the Stream-K parallel decomposition for GEMM threadblocks versus the "classic data-parallel" and "Split-K" decompositions + residual add. For more details regarding the Stream-K method, see "Stream-K: Work-centric Parallel Decomposition for Dense Matrix-Matrix Multiplication on the GPU" (https://arxiv.org/abs/2301.03598) Requires NVIDIA Ampere or newer device (SM80+). - To lock persistence mode, power (400W), clocks (1005MHz) for evaluation (assumes device 0 and A100) cutlass$ sudo nvidia-smi -pm 1 -i 0 cutlass$ sudo nvidia-smi -i 0 -pl 400 cutlass$ sudo nvidia-smi -i 0 -lgc 1005 - Build and run: cutlass$ mkdir build cutlass$ cd build cutlass/build$ cmake .. -DCUTLASS_NVCC_ARCHS=80 cutlass/build$ make 47_ampere_gemm_universal_streamk_broadcast cutlass/build$ ./examples/47_ampere_gemm_universal_streamk/47_ampere_gemm_universal_streamk_broadcast - Reset clocks when done: cutlass$ sudo nvidia-smi -rgc **************************************************************************************************/ #include <iostream> #include <string> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_universal.h" #include "cutlass/gemm/device/gemm_universal_with_broadcast.h" #include "cutlass/gemm/device/gemm_universal_streamk_with_broadcast.h" #include "cutlass/epilogue/thread/linear_combination_residual_block.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_foreach.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// GEMM kernel configurations (cutlass_tensorop_h16816gemm_128x128_32x4_nn_align8) ///////////////////////////////////////////////////////////////////////////////////////////////// // A matrix configuration using ElementA = cutlass::half_t; // Element type for A matrix operand using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes) // B matrix configuration using ElementB = cutlass::half_t; // Element type for B matrix operand using LayoutB = cutlass::layout::RowMajor; // Layout type for B matrix operand constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes) // C1/C2/D matrix configuration using ElementC = cutlass::half_t; // Element type for C matrix operands using LayoutC = cutlass::layout::RowMajor; // Layout type for C matrix operands constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value; // Memory access granularity/alignment of C matrices in units of elements (up to 16 bytes) // Output matrix configuration using ElementOutput = cutlass::half_t; // Element type for output matrix operands using LayoutOutput = cutlass::layout::RowMajor; // Layout type for output matrix operands // constexpr int AlignmentOutput = 128 / cutlass::sizeof_bits<ElementOutput>::value; // Memory access granularity/alignment of output matrices in units of elements (up to 16 bytes) // Multiply-accumulate blocking/pipelining details using ElementAccumulator = cutlass::half_t; // Element type for internal accumulation using ElementCompute = cutlass::half_t; // Element type for compute using ArchTag = cutlass::arch::Sm80; // Tag indicating the minimum SM that supports the intended feature using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock-level tile size (concept: GemmShape) using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp-level tile size (concept: GemmShape) using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // Instruction-level tile size (concept: GemmShape) constexpr int NumStages = 4; // Number of global->shared pipeline stages used in the GEMM mainloop // Residual block configuration // Epilogue output operator /// Using LinearCombinationResidualBlock /// Models a residual block of the form: UnaryOp(BinaryOp(BinaryOp(ActivationOp(TensorOp(X) + bias), residual1), residual2)) using EpilogueOp = cutlass::epilogue::thread::LinearCombinationResidualBlock< ElementOutput, // Element type for output matrix ElementAccumulator, // Element type from internal accumulation ElementCompute, // Element type from internal accumulation ElementC, // Element type for C1/C2/D matrix operands AlignmentC, // Memory access granularity of C and D matrix in units of elements cutlass::epilogue::thread::Identity, // Activation cutlass::plus, // Binary operation 1 cutlass::epilogue::thread::Identity, // Unary operation cutlass::plus // Binary operation 2 >; // Reference device GEMM implementation type using DeviceGemmReference = cutlass::reference::device::Gemm< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, ElementAccumulator>; // Classic data-parallel device GEMM implementation type using DeviceGemmBasic = cutlass::gemm::device::GemmUniversalWithBroadcast< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, NumStages, AlignmentA, AlignmentB>; // StreamK device GEMM implementation type using DeviceGemmStreamK = cutlass::gemm::device::GemmUniversalStreamkWithBroadcast< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::ThreadblockSwizzleStreamK, NumStages, AlignmentA, AlignmentB>; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Testbed utility types ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double avg_runtime_ms; double gflops; cutlass::Status status; cudaError_t error; bool passed; Result( double avg_runtime_ms = 0, double gflops = 0, cutlass::Status status = cutlass::Status::kSuccess, cudaError_t error = cudaSuccess) : avg_runtime_ms(avg_runtime_ms), gflops(gflops), status(status), error(error), passed(true) {} }; /// Command line options parsing struct Options { std::string command_name; bool help; cutlass::gemm::GemmCoord problem_size; float alpha; float beta; int split_k_factor; int avail_sms; int iterations; bool real; cutlass::HostTensor<ElementA, LayoutA> tensor_a; cutlass::HostTensor<ElementB, LayoutB> tensor_b; cutlass::HostTensor<ElementC, LayoutC> tensor_c1; cutlass::HostTensor<ElementC, LayoutC> tensor_c2; cutlass::HostTensor<ElementC, LayoutC> tensor_d; cutlass::HostTensor<ElementC, LayoutC> tensor_ref_d; cutlass::HostTensor<ElementC, LayoutC> tensor_Vector; // cutlass::HostTensor<ElementC, LayoutC> tensor_Tensor; Options(std::string command_name) : command_name(command_name), help(false), problem_size({2048, 2048, 2048}), alpha(1.0f), beta(1.0f), split_k_factor(1), avail_sms(-1), // Number of device SMs to use is unlimited real(false), iterations(10000) {} bool valid() const { return true; } void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("split", split_k_factor); cmd.get_cmd_line_argument("iterations", iterations); real = cmd.check_cmd_line_flag("real"); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "Performs a GEMM computation.\n" << "\n" << "Options:\n" << "\n" << " --help If specified, displays this usage statement.\n\n" << " --m=<int> GEMM M dimension\n" << " --n=<int> GEMM N dimension\n" << " --k=<int> GEMM K dimension\n" << " --alpha=<f32> Epilogue scalar alpha\n" << " --beta=<f32> Epilogue scalar beta\n\n" << " --split=<int> Split-K factor to emulate\n\n" << " --real If specified, initializes with real values instead of whole numbers. Errors are to be expected.\n\n" << " --iterations=<int> Number of profiling iterations to perform.\n\n"; out << "\n\nExamples:\n\n" << "$ " << command_name << " --m=1024 --n=512 --k=1024 --alpha=2 --beta=0.707 \n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Two flops per multiply-add return 2.0 * double(problem_size.product()) / double(1.0e9) / runtime_s; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// GEMM evaluation ///////////////////////////////////////////////////////////////////////////////////////////////// /// Populates a DeviceGemmBasic::Arguments structure from the given commandline options typename DeviceGemmBasic::Arguments args_from_options( const DeviceGemmBasic &device_gemm, const Options &options, cutlass::HostTensor<ElementA, LayoutA> &tensor_a, cutlass::HostTensor<ElementB, LayoutB> &tensor_b, cutlass::HostTensor<ElementC, LayoutC> &tensor_c1, cutlass::HostTensor<ElementC, LayoutC> &tensor_c2, cutlass::HostTensor<ElementC, LayoutC> &tensor_d, cutlass::HostTensor<ElementC, LayoutC> &tensor_Vector /*, cutlass::HostTensor<ElementC, LayoutC> &tensor_Tensor */ ) { return typename DeviceGemmBasic::Arguments( cutlass::gemm::GemmUniversalMode::kGemm, // universal mode options.problem_size, // problem_size options.split_k_factor, // batch count / splitk slices { // epilogue parameters ElementAccumulator(options.alpha), ElementAccumulator(options.beta) }, tensor_a.device_data(), // ptr_A tensor_b.device_data(), // ptr_B tensor_c1.device_data(), // ptr_C1 tensor_c2.device_data(), // ptr_C2 tensor_d.device_data(), // ptr_D tensor_Vector.device_data(), // ptr_Vector /* tensor_Tensor.device_data(), */nullptr,// ptr_Tensor options.problem_size.mk().product(), // batch_stride_A options.problem_size.nk().product(), // batch_stride_B options.problem_size.mn().product(), // batch_stride_C1 options.problem_size.mn().product(), // batch_stride_C2 options.problem_size.mn().product(), // batch_stride_D options.problem_size.mn().product(), // batch_stride_Vector options.problem_size.mn().product(), // batch_stride_Tensor tensor_a.layout().stride(0), // stride_a tensor_b.layout().stride(0), // stride_b tensor_c1.layout().stride(0), // stride_c1 tensor_c2.layout().stride(0), // stride_c2 tensor_d.layout().stride(0), // stride_d /*tensor_Vector.layout().stride(0)*/0, // stride_Vector /*tensor_Tensor.layout().stride(0)*/0); // stride_Tensor } /// Populates a DeviceGemmStreamK::Arguments structure from the given commandline options typename DeviceGemmStreamK::Arguments args_from_options( const DeviceGemmStreamK &device_gemm, const Options &options, cutlass::HostTensor<ElementA, LayoutA> &tensor_a, cutlass::HostTensor<ElementB, LayoutB> &tensor_b, cutlass::HostTensor<ElementC, LayoutC> &tensor_c1, cutlass::HostTensor<ElementC, LayoutC> &tensor_c2, cutlass::HostTensor<ElementC, LayoutC> &tensor_d, cutlass::HostTensor<ElementC, LayoutC> &tensor_Vector/*, cutlass::HostTensor<ElementC, LayoutC> &tensor_Tensor*/ ) { return typename DeviceGemmStreamK::Arguments( cutlass::gemm::GemmUniversalMode::kGemm, // universal mode options.problem_size, // problem_size options.split_k_factor, // batch count / splitk slices { // epilogue parameters ElementAccumulator(options.alpha), ElementAccumulator(options.beta) }, tensor_a.device_data(), // ptr_A tensor_b.device_data(), // ptr_B tensor_c1.device_data(), // ptr_C1 tensor_c2.device_data(), // ptr_C2 tensor_d.device_data(), // ptr_D tensor_Vector.device_data(), // ptr_Vector /* tensor_Tensor.device_data(), */nullptr,// ptr_Tensor // We're not storing Tensor options.problem_size.mk().product(), // batch_stride_A options.problem_size.nk().product(), // batch_stride_B options.problem_size.mn().product(), // batch_stride_C1 options.problem_size.mn().product(), // batch_stride_C2 options.problem_size.mn().product(), // batch_stride_D options.problem_size.mn().product(), // batch_stride_Vector options.problem_size.mn().product(), // batch_stride_Tensor tensor_a.layout().stride(0), // stride_a tensor_b.layout().stride(0), // stride_b tensor_c1.layout().stride(0), // stride_c1 tensor_c2.layout().stride(0), // stride_c2 tensor_d.layout().stride(0), // stride_d /*tensor_Vector.layout().stride(0)*/0, // stride_Vector // Vector stride is always 0 /*tensor_Tensor.layout().stride(0)*/0, // stride_Tensor // We're not storing Tensor options.avail_sms); // avail_sms } /// Execute a given example GEMM computation template <typename DeviceGemmT> Result run(std::string description, Options &options) { // Display test description std::cout << std::endl << description << std::endl; // Zero-initialize test output matrix D cutlass::reference::host::TensorFill(options.tensor_d.host_view()); options.tensor_d.sync_device(); // Instantiate CUTLASS kernel depending on templates DeviceGemmT device_gemm; // Create a structure of gemm kernel arguments suitable for invoking an instance of DeviceGemmT auto arguments = args_from_options(device_gemm, options, options.tensor_a, options.tensor_b, options.tensor_c1, options.tensor_c2, options.tensor_d, options.tensor_Vector/*, options.tensor_Tensor*/); // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = DeviceGemmT::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Check the problem size is supported or not CUTLASS_CHECK(device_gemm.can_implement(arguments)); // Initialize CUTLASS kernel with arguments and workspace pointer CUTLASS_CHECK(device_gemm.initialize(arguments, workspace.get())); // Correctness / Warmup iteration CUTLASS_CHECK(device_gemm()); // Copy output data from CUTLASS and reference kernel to host for comparison options.tensor_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not Result result; result.passed = cutlass::reference::host::TensorEquals( options.tensor_d.host_view(), options.tensor_ref_d.host_view()); double err = cutlass::reference::host::TensorRelativeErrorMetric( options.tensor_d.host_view(), options.tensor_ref_d.host_view()); std::cout << " Disposition: " << (result.passed ? "Passed" : "Failed") << " \t Relative error: " << err << std::endl; // Run profiling loop if (options.iterations > 0) { GpuTimer timer; timer.start(); for (int iter = 0; iter < options.iterations; ++iter) { CUTLASS_CHECK(device_gemm()); } timer.stop(); // Compute average runtime and GFLOPs. float elapsed_ms = timer.elapsed_millis(); result.avg_runtime_ms = double(elapsed_ms) / double(options.iterations); result.gflops = options.gflops(result.avg_runtime_ms / 1000.0); std::cout << " Avg runtime: " << result.avg_runtime_ms << " ms" << std::endl; std::cout << " GFLOPs: " << result.gflops << std::endl; } // TODO: uncomment when results match //if (!result.passed) { // exit(-1); //} return result; } /// Program entrypoint int main(int argc, const char **argv) { // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } // Current device must must have compute capability at least 80 cudaDeviceProp props; int current_device_id; CUDA_CHECK(cudaGetDevice(&current_device_id)); CUDA_CHECK(cudaGetDeviceProperties(&props, current_device_id)); if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } // Parse commandline options Options options("ampere_streamk_broadcast_gemm"); options.parse(argc, argv); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } std::cout << options.iterations << " timing iterations of " << options.problem_size.m() << " x " << options.problem_size.n() << " x " << options.problem_size.k() << " matrix-matrix multiply" << std::endl; if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } // // Initialize GEMM datasets // // Initialize tensors using CUTLASS helper functions options.tensor_a.resize(options.problem_size.mk()); // <- Create matrix A with dimensions M x K options.tensor_b.resize(options.problem_size.kn()); // <- Create matrix B with dimensions K x N options.tensor_c1.resize(options.problem_size.mn()); // <- Create matrix C1 with dimensions M x N options.tensor_c2.resize(options.problem_size.mn()); // <- Create matrix C2 with dimensions M x N options.tensor_d.resize(options.problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from CUTLASS kernel options.tensor_ref_d.resize(options.problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from reference kernel options.tensor_Vector.resize({1, options.problem_size.n()}); // <- Create broadcast vector with dimensions N x 1 // options.tensor_Tensor.resize(options.problem_size.mn()); // <- Create T matrix with dimensions M x N int _init_bits = options.real ? -1 : 0; // Fill matrix A on host with uniform-random data [-2, 2] cutlass::reference::host::TensorFillRandomUniform( options.tensor_a.host_view(), 1, ElementA(2), ElementA(-2), _init_bits); // Fill matrix B on host with uniform-random data [-2, 2] cutlass::reference::host::TensorFillRandomUniform( options.tensor_b.host_view(), 1, ElementB(2), ElementB(-2), _init_bits); // Fill matrix C1 on host with uniform-random data [-2, 2] cutlass::reference::host::TensorFillRandomUniform( options.tensor_c1.host_view(), 1, ElementC(2), ElementC(-2), _init_bits); // Fill matrix C2 on host with uniform-random data [-2, 2] cutlass::reference::host::TensorFillRandomUniform( options.tensor_c2.host_view(), 1, ElementC(2), ElementC(-2), _init_bits); cutlass::reference::host::TensorFillRandomUniform( options.tensor_Vector.host_view(), 1, ElementC(2), ElementC(-2), _init_bits); // // Compute reference output // // Copy data from host to GPU options.tensor_a.sync_device(); options.tensor_b.sync_device(); options.tensor_c1.sync_device(); options.tensor_c2.sync_device(); options.tensor_Vector.sync_device(); // options.tensor_Tensor.sync_device(); // Zero-initialize reference output matrix D cutlass::reference::host::TensorFill(options.tensor_ref_d.host_view()); options.tensor_ref_d.sync_device(); // Create instantiation for device reference gemm kernel DeviceGemmReference gemm_reference; // Launch device reference gemm kernel gemm_reference( options.problem_size, ElementAccumulator(options.alpha), options.tensor_a.device_ref(), options.tensor_b.device_ref(), ElementAccumulator(options.beta), options.tensor_c1.device_ref(), options.tensor_ref_d.device_ref()); // Wait for kernels to finish CUDA_CHECK(cudaDeviceSynchronize()); // Copy output data from reference kernel to host for comparison options.tensor_ref_d.sync_host(); // Add broadcast vector (without multiplier) // This is only possible because BinaryOp is addition, and UnaryOps are identity. // This makes the addition of broadcast vector commutable. /// identity(plus(identity(alpha * (a * b) + v), beta * c)) == /// alpha * a * b + v + beta * c == /// (alpha * a * b + beta * c) + v == /// GEMM(a, b, c) + v // Vector broadcast on host for (int i=0; i < options.problem_size.m(); ++i) { for (int j=0; j < options.problem_size.n(); ++j) { options.tensor_ref_d.host_view().ref().at({i, j}) += options.tensor_Vector.host_view().ref().at({0, j}); options.tensor_ref_d.host_view().ref().at({i, j}) += options.tensor_c2.host_view().ref().at({i, j}); } } // Sync back with device just in case options.tensor_ref_d.sync_device(); // // Evaluate CUTLASS kernels // // Test default operation if (options.split_k_factor == 1) { // Compare basic data-parallel version versus StreamK version using default load-balancing heuristics Result basic_dp = run<DeviceGemmBasic>("Basic data-parallel GEMM", options); Result streamk_default = run<DeviceGemmStreamK>("StreamK GEMM with default load-balancing", options); printf(" Speedup vs Basic-DP: %.3f\n", (basic_dp.avg_runtime_ms / streamk_default.avg_runtime_ms)); // Show that StreamK can emulate basic data-parallel GEMM when we set the number of SMs to load-balance across = 1 options.avail_sms = 1; // Set loadbalancing width to 1 SM (no load balancing) Result streamk_dp = run<DeviceGemmStreamK>("StreamK emulating basic data-parallel GEMM", options); options.avail_sms = -1; // Reset loadbalancing width to unspecified SMs (i.e., the number of device SMs) printf(" Speedup vs Basic-DP: %.3f\n", (basic_dp.avg_runtime_ms / streamk_dp.avg_runtime_ms)); options.split_k_factor++; // Increment splitting factor for next evaluation } // Show that StreamK can emulate "Split-K" with a tile-splitting factor Result basic_splitk = run<DeviceGemmBasic>( std::string("Basic split-K GEMM with tile-splitting factor ") + std::to_string(options.split_k_factor), options); Result streamk_splitk = run<DeviceGemmStreamK>( std::string("StreamK emulating Split-K GEMM with tile-splitting factor ") + std::to_string(options.split_k_factor), options); printf(" Speedup vs Basic-SplitK: %.3f\n", (basic_splitk.avg_runtime_ms / streamk_splitk.avg_runtime_ms)); return 0; }
d033f8dff77f82bfb7b8cd159e0b95404279a69c.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, false>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
d033f8dff77f82bfb7b8cd159e0b95404279a69c.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, false>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
1321b266327c761d15448257798c5fd63cdb5d2e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gtest/gtest.h" #include "sift.h" #include "cuda_task_executor.h" #include "gpudevice.h" #include <vector> #include <iterator> #include <algorithm> #include <rocblas.h> #include <stdexcept> #include "spdlog/spdlog.h" #include <iostream> #include <vector> #include <string> #include <memory> #include <cstdint> #include <random> #include <thrust/host_vector.h> #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/uniform_int_distribution.h> namespace { class SiftTest : public ::testing::Test { protected: std::shared_ptr<spdlog::logger> logger_; SiftTest() { logger_ = spdlog::get("console"); if (! logger_) { logger_ = spdlog::stdout_logger_mt("console"); } spdlog::set_level(spdlog::level::trace); //spdlog::set_level(spdlog::level::info); } virtual ~SiftTest() { } template <class T> void print_arr(T * arr, int N) { printf("print_arr\n"); for (int i=0; i < N; i++) { logger_->debug("\t[{}]={}", i, arr[i]); /*printf("\t[%d]=%.5f\n", i, arr[i]);*/ } } }; TEST_F(SiftTest, DotProductTest) { int cols = 3; double first[3] = {1,2,3}; double *dfirst; hipMalloc(&dfirst, sizeof(double) * cols); hipMemcpy(dfirst, first, sizeof(double) * cols, hipMemcpyHostToDevice); double second[3] = {5, .5, 2}; double* dsecond; hipMalloc(&dsecond, sizeof(double) * cols); hipMemcpy(dsecond, second, sizeof(double) * cols, hipMemcpyHostToDevice); double out[1] = {0}; double *dout; hipMalloc(&dout, sizeof(double)); hipLaunchKernelGGL(( cudautils::dot_product_wrap), dim3(1),dim3(1), 0, 0, dfirst, dsecond, dout, 1, cols); hipMemcpy(out, dout, sizeof(double), hipMemcpyDeviceToHost); ASSERT_EQ(*out, 12); // 3x3 matrix int rows = 3; double matrix[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; double *dmatrix; hipMalloc(&dmatrix, sizeof(double) * 9); hipMemcpy(dmatrix, matrix, sizeof(double) * 9, hipMemcpyHostToDevice); double vec[3] = {3, 1, 2}; double *dvec; hipMalloc(&dvec, sizeof(double) * 3); hipMemcpy(dvec, vec, sizeof(double) * 3, hipMemcpyHostToDevice); double* dout_arr; hipMalloc(&dout_arr, rows * sizeof(double)); hipLaunchKernelGGL(( cudautils::dot_product_wrap), dim3(1),dim3(1), 0, 0, dmatrix, dvec, dout_arr, rows, cols); double* out_arr = (double*) malloc(rows * sizeof(double)); hipMemcpy(out_arr, dout_arr, sizeof(double) * rows, hipMemcpyDeviceToHost); double answer[3] = {11, 29, 47}; for (int i=0; i < rows; i++) ASSERT_EQ(out_arr[i], answer[i]); hipFree(dfirst); hipFree(dsecond); hipFree(dout); hipFree(dmatrix); hipFree(dvec); hipFree(dout_arr); free(out_arr); } TEST_F(SiftTest, GetBinIdxTest) { cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, 0, 0, 0, 0); const int IndexSize = sift_params.IndexSize; double xySpacing = (double) sift_params.xyScale * sift_params.MagFactor; double tSpacing = (double) sift_params.tScale * sift_params.MagFactor; int xyiradius = round(1.414 * xySpacing * (sift_params.IndexSize + 1) / 2.0); int tiradius = round(1.414 * tSpacing * (sift_params.IndexSize + 1) / 2.0); // Surrounding radius of pixels are binned for computation // according to sift_params.IndexSize int *i_bin, *j_bin, *k_bin; int hi, hj, hk; hipMalloc(&i_bin, sizeof(int)); hipMalloc(&j_bin, sizeof(int)); hipMalloc(&k_bin, sizeof(int)); for (int i = -xyiradius; i <= xyiradius; i++) { for (int j = -xyiradius; j <= xyiradius; j++) { for (int k = -tiradius; k <= tiradius; k++) { // Find bin idx hipLaunchKernelGGL(( cudautils::get_bin_idx_wrap), dim3(1),dim3(1), 0, 0, i, xyiradius, IndexSize, i_bin); hipLaunchKernelGGL(( cudautils::get_bin_idx_wrap), dim3(1),dim3(1), 0, 0, j, xyiradius, IndexSize, j_bin); hipLaunchKernelGGL(( cudautils::get_bin_idx_wrap), dim3(1),dim3(1), 0, 0, k, tiradius, IndexSize, k_bin); hipMemcpy(&hi, i_bin, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&hj, j_bin, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&hk, k_bin, sizeof(int), hipMemcpyDeviceToHost); ASSERT_GE(hi, 0); ASSERT_LT(hi, sift_params.IndexSize); ASSERT_GE(hj, 0); ASSERT_LT(hj, sift_params.IndexSize); ASSERT_GE(hk, 0); ASSERT_LT(hk, sift_params.IndexSize); } } } hipFree(i_bin); hipFree(j_bin); hipFree(k_bin); free(fv_centers); } TEST_F(SiftTest, BinSub2Ind1Test) { cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, 0, 0, 0, 0); int* bin_index; hipMalloc(&bin_index, sizeof(int)); int temp; for (int i=0; i < sift_params.IndexSize; i++) { for (int j=0; j < sift_params.IndexSize; j++) { for (int k=0; k < sift_params.IndexSize; k++) { for (uint16_t ixi=0; ixi < sift_params.nFaces; ixi++) { hipLaunchKernelGGL(( cudautils::bin_sub2ind_wrap), dim3(1),dim3(1), 0, 0, i, j, k, ixi, sift_params, bin_index); hipMemcpy(&temp, bin_index, sizeof(int), hipMemcpyDeviceToHost); ASSERT_LT(temp, sift_params.descriptor_len); } } } } int max = sift_params.descriptor_len - 1; hipLaunchKernelGGL(( cudautils::bin_sub2ind_wrap), dim3(1),dim3(1), 0, 0, 1,1,1,79, sift_params, bin_index); hipMemcpy(&temp, bin_index, sizeof(int), hipMemcpyDeviceToHost); ASSERT_EQ(temp, max); hipLaunchKernelGGL(( cudautils::bin_sub2ind_wrap), dim3(1),dim3(1), 0, 0, 0,1,1,79, sift_params, bin_index); hipMemcpy(&temp, bin_index, sizeof(int), hipMemcpyDeviceToHost); ASSERT_EQ(temp, max - 1); hipLaunchKernelGGL(( cudautils::bin_sub2ind_wrap), dim3(1),dim3(1), 0, 0, 1,0,1,79, sift_params, bin_index); hipMemcpy(&temp, bin_index, sizeof(int), hipMemcpyDeviceToHost); ASSERT_EQ(temp, max - 2); hipLaunchKernelGGL(( cudautils::bin_sub2ind_wrap), dim3(1),dim3(1), 0, 0, 1,1,0,79, sift_params, bin_index); hipMemcpy(&temp, bin_index, sizeof(int), hipMemcpyDeviceToHost); ASSERT_EQ(temp, max - 4); hipLaunchKernelGGL(( cudautils::bin_sub2ind_wrap), dim3(1),dim3(1), 0, 0, 1,1,1,78, sift_params, bin_index); hipMemcpy(&temp, bin_index, sizeof(int), hipMemcpyDeviceToHost); ASSERT_EQ(temp, max - 8); hipFree(bin_index); free(fv_centers); } TEST_F(SiftTest, Sub2Ind1Test) { cudautils::Sub2Ind sub2ind(3,4,5); ASSERT_EQ(0, sub2ind(0,0,0)); ASSERT_EQ(1, sub2ind(1,0,0)); ASSERT_EQ(7, sub2ind(1,2,0)); ASSERT_EQ(43, sub2ind(1,2,3)); } TEST_F(SiftTest, Sub2Ind2Test) { cudautils::Sub2Ind sub2ind(3,4,5,1,1,1); // 16 = sub2ind(1,1,1) ASSERT_EQ(16, sub2ind(0,0,0)); ASSERT_EQ(17, sub2ind(1,0,0)); ASSERT_EQ(23, sub2ind(1,2,0)); ASSERT_EQ(59, sub2ind(1,2,3)); } TEST_F(SiftTest, Sub2Ind3Test) { cudautils::Sub2Ind sub2ind(3,4,5); sub2ind.setBasePos(1,1,1); // 9 = sub2ind(1,1,1) ASSERT_EQ(16, sub2ind(0,0,0)); ASSERT_EQ(17, sub2ind(1,0,0)); ASSERT_EQ(23, sub2ind(1,2,0)); ASSERT_EQ(59, sub2ind(1,2,3)); } TEST_F(SiftTest, Ind2Sub1Test) { unsigned int x_stride = 4; unsigned int y_stride = 5; unsigned int x = 0; unsigned int y = 0; unsigned int z = 0; cudautils::ind2sub(x_stride, y_stride, 2, x, y, z); ASSERT_EQ(2, x); ASSERT_EQ(0, y); ASSERT_EQ(0, z); x = y = z = 0; cudautils::ind2sub(x_stride, y_stride, 7, x, y, z); ASSERT_EQ(3, x); ASSERT_EQ(1, y); ASSERT_EQ(0, z); x = y = z = 0; cudautils::ind2sub(x_stride, y_stride, 25, x, y, z); ASSERT_EQ(1, x); ASSERT_EQ(1, y); ASSERT_EQ(1, z); } TEST_F(SiftTest, get_grad_ori_vectorTest) { const unsigned int x_size = 3; const unsigned int y_size = 3; const unsigned int z_size = 3; long long N = x_size * y_size * z_size; const unsigned int keypoint_num = 1; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); sift_params.MagFactor = 1; sift_params.Tessel_thresh = 1; double *image, *h_image; // create img h_image = (double*) malloc(sizeof(double) * N); hipMalloc(&image, sizeof(double) * N); for (int i=0; i < N; i++) { h_image[i] = (double)rand() / RAND_MAX * 10.0; // logger_->info("image[{}]={}", i, h_image[i]); } hipMemcpy(image, h_image, N * sizeof(double), hipMemcpyHostToDevice); double* device_centers; // sift_params.fv_centers must be placed on device since array passed to cuda kernel // default fv_centers_len 80 * 3 (3D) = 240; cudaSafeCall(hipMalloc((void **) &device_centers, sizeof(double) * sift_params.fv_centers_len)); cudaSafeCall(hipMemcpy((void *) device_centers, (const void *) fv_centers, (size_t) sizeof(double) * sift_params.fv_centers_len, hipMemcpyHostToDevice)); long long idx = 13; // center of cube int r = 1; int c = 1; int t = 1; unsigned int x_stride = x_size; unsigned int y_stride = y_size; double* yy, *h_yy; hipMalloc(&yy, sizeof(double) * sift_params.nFaces); h_yy = (double*) malloc(sizeof(double) * sift_params.nFaces); uint16_t* ix; hipMalloc(&ix, sizeof(uint16_t) * sift_params.fv_centers_len); double vect[3] = {0.0, 0.0, 0.0}; double *dvect; hipMalloc(&dvect, sizeof(double) * 3); hipMemcpy(dvect, vect, sizeof(double) * 3, hipMemcpyHostToDevice); double* mag; hipMalloc(&mag, sizeof(double)); hipLaunchKernelGGL(( cudautils::get_grad_ori_vector_wrap), dim3(1),dim3(1), 0, 0, image, idx, x_stride, y_stride, r, c, t, dvect, yy, ix, sift_params, device_centers, mag); hipMemcpy(h_yy, yy, sizeof(double) * sift_params.nFaces, hipMemcpyDeviceToHost); // make sure yy's values are descending double max = h_yy[0]; for (int i=1; i < sift_params.nFaces; i++) { // logger_->info("yy[{}]={}", i, h_yy[i]); ASSERT_GE(max, h_yy[i]); max = h_yy[i]; //update } free(fv_centers); } TEST_F(SiftTest, SiftBoundaryTest) { const unsigned int x_size = 10; const unsigned int y_size = 10; const unsigned int z_size = 6; long long N = x_size * y_size * z_size; const unsigned int keypoint_num = 1; const unsigned int num_gpus = 2; const unsigned int num_streams = 1; const unsigned int x_sub_size = x_size; const unsigned int y_sub_size = y_size / num_gpus; const unsigned int dx = x_sub_size; const unsigned int dy = y_sub_size; const unsigned int dw = 0; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); /*// create img*/ /*double *image, *h_image; */ /*int8_t *map, *h_map;*/ /*h_image = (double*) malloc(sizeof(double) * N);*/ /*h_map = (int8_t*) malloc(sizeof(int8_t) * N);*/ /*hipMalloc(&image, sizeof(double) * N);*/ /*hipMalloc(&map, sizeof(int8_t) * N);*/ /*for (int i=0; i < N; i++) {*/ /*h_image[i] = i;*/ /*h_map[i] = 1;*/ /*}*/ // create img thrust::host_vector<double> image(N); thrust::generate(image.begin(), image.end(), rand); // create map thrust::host_vector<int8_t> map(N); thrust::fill_n(map.begin(), N, 1.0); // test a keypoint in the last pixel map[N - 1] = 0.0; //select for processing // investigate a boundary point between GPUs int x = 5; int y = 5; int z = 3; long long idx = x + x_size * y + x_size * y_size * z; map[idx] = 0.0; // select this point for processing /*hipMemcpy(image, h_image, N * sizeof(double), hipMemcpyHostToDevice);*/ /*hipMemcpy(map, h_map, N * sizeof(int8_t), hipMemcpyHostToDevice);*/ std::shared_ptr<cudautils::Sift> ni = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor(num_gpus, num_streams, ni); ni->setImage(image.data()); ni->setMap(map.data()); executor.run(); free(fv_centers); } TEST_F(SiftTest, SiftSimpleTest) { const unsigned int x_size = 10; const unsigned int y_size = 10; const unsigned int z_size = 6; const unsigned int keypoint_num = 1; const unsigned int num_gpus = 1; const unsigned int num_streams = 1; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int y_sub_size = y_size / num_gpus; const unsigned int dx = x_sub_size; const unsigned int dy = y_sub_size; const unsigned int dw = 0; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); // create img thrust::host_vector<double> image(image_size); thrust::generate(image.begin(), image.end(), rand); // create map thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); long long idx; for (int i=0; i < keypoint_num; i++) { // warning not evenly distributed across the image // chosen to roughly distribute across GPUs (y axis) idx = (x_size * rand()) % image_size; map[idx] = 0.0; // select this point for processing } std::shared_ptr<cudautils::Sift> ni = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor(num_gpus, num_streams, ni); ni->setImage(thrust::raw_pointer_cast(image.data())); ni->setMap(thrust::raw_pointer_cast(map.data())); executor.run(); free(fv_centers); } TEST_F(SiftTest, SiftSimple2Test) { const unsigned int x_size = 3; const unsigned int y_size = 3; const unsigned int z_size = 3; const unsigned int keypoint_num = 1; const unsigned int num_gpus = 1; const unsigned int num_streams = 1; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int y_sub_size = y_size / num_gpus; const unsigned int dx = x_sub_size; const unsigned int dy = y_sub_size; const unsigned int dw = 0; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); cudautils::Sub2Ind sub2ind(3,3,3); // create img thrust::host_vector<double> image(image_size); image[sub2ind(1,1,1)] = 1; // create map thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); map[sub2ind(1,1,1)] = 0.0; // select this point for processing std::shared_ptr<cudautils::Sift> ni = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor(num_gpus, num_streams, ni); ni->setImage(thrust::raw_pointer_cast(image.data())); ni->setMap(thrust::raw_pointer_cast(map.data())); executor.run(); cudautils::Keypoint_store keystore; ni->getKeystore(&keystore); std::vector<int> correct_idx = { 3, 11, 35, 71, 87, 111, 133, 141, 165, 199, 207, 231, 263, 271, 295, 326, 342, 358 }; for (int i=0; i < keystore.len; i++) { cudautils::Keypoint key = keystore.buf[i]; // printf("Keypoint:%d\n", i); for (int j=0; j < sift_params.descriptor_len; j++) { if (std::find(correct_idx.begin(), correct_idx.end(), j) != correct_idx.end()) { // printf("\t%d: %d\n", j, (int) key.ivec[j]); ASSERT_EQ(121, (int) key.ivec[j]); } else { ASSERT_EQ( 0, (int) key.ivec[j]); } } } free(fv_centers); } TEST_F(SiftTest, SiftSimple3Test) { const unsigned int x_size = 5; const unsigned int y_size = 5; const unsigned int z_size = 5; const unsigned int keypoint_num = 1; const unsigned int num_gpus = 1; const unsigned int num_streams = 1; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int y_sub_size = y_size / num_gpus; const unsigned int dx = x_sub_size; const unsigned int dy = y_sub_size; const unsigned int dw = 0; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); cudautils::Sub2Ind sub2ind(5,5,5); // create img thrust::host_vector<double> image(image_size); image[sub2ind(1,1,2)] = 1; image[sub2ind(1,2,2)] = 2; image[sub2ind(1,3,2)] = 3; image[sub2ind(2,1,2)] = 2; image[sub2ind(2,2,2)] = 5; // keypoint image[sub2ind(2,3,2)] = 1; image[sub2ind(3,1,2)] = 1; image[sub2ind(3,2,2)] = 3; image[sub2ind(3,3,2)] = 2; image[sub2ind(1,1,1)] = 0; image[sub2ind(1,2,1)] = 1; image[sub2ind(1,3,1)] = 2; image[sub2ind(2,1,1)] = 1; image[sub2ind(2,2,1)] = 3; image[sub2ind(2,3,1)] = 0; image[sub2ind(3,1,1)] = 0; image[sub2ind(3,2,1)] = 2; image[sub2ind(3,3,1)] = 1; // create map thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); map[sub2ind(2,2,2)] = 0.0; // select this point for processing std::shared_ptr<cudautils::Sift> ni = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor(num_gpus, num_streams, ni); ni->setImage(thrust::raw_pointer_cast(image.data())); ni->setMap(thrust::raw_pointer_cast(map.data())); executor.run(); cudautils::Keypoint_store keystore; ni->getKeystore(&keystore); std::vector<int> correct_idx = { 1, 2, 3, 9, 10, 11, 19, 33, 34, 35, 50, 51, 59, 68, 69, 70, 71, 84, 85, 86, 87, 108, 109, 110, 111, 127, 129, 132, 133, 137, 140, 141, 145, 153, 161, 164, 165, 181, 189, 194, 195, 198, 199, 202, 203, 206, 207, 211, 219, 223, 226, 227, 230, 231, 238, 247, 255, 259, 261, 263, 267, 269, 271, 277, 291, 293, 295, 311, 322, 324, 326, 332, 334, 338, 340, 342, 350, 354, 356, 358, 387, 393, 397, 409, 411, 423, 437, 448, 449, 456, 458, 460, 466, 472, 474, 500, 514, 515, 523, 567, 579, 614, 615, 638 }; std::vector<int> correct_val = { 17, 49, 100, 17, 86, 109, 40, 17, 49, 100, 37, 77, 45, 9, 26, 44, 97, 9, 26, 44, 97, 9, 26, 44, 109, 22, 37, 17, 50, 17, 17, 50, 51, 36, 17, 17, 86, 35, 67, 33, 16, 50, 50, 33, 63, 50, 109, 42, 21, 16, 33, 16, 50, 50, 15, 107, 21, 49, 17, 84, 49, 17, 109, 35, 49, 17, 84, 22, 49, 17, 84, 43, 22, 49, 17, 109, 68, 49, 17, 84, 59, 29, 30, 14, 16, 40, 30, 8, 8, 8, 17, 16, 43, 28, 48, 16, 7, 16, 20, 20, 16, 41, 21, 23 }; for (int i=0; i < keystore.len; i++) { cudautils::Keypoint key = keystore.buf[i]; // printf("Keypoint:%d\n", i); for (int j=0; j < sift_params.descriptor_len; j++) { auto it = std::find(correct_idx.begin(), correct_idx.end(), j); if (it != correct_idx.end()) { int pos = it - correct_idx.begin(); // printf("\t%d: %d\n", j, (int) key.ivec[j]); ASSERT_EQ(correct_val[pos], (int) key.ivec[j]); } else { ASSERT_EQ(0, (int) key.ivec[j]); } } } free(fv_centers); } TEST_F(SiftTest, SiftSimple4Test) { const unsigned int x_size = 20; const unsigned int y_size = 20; const unsigned int z_size = 20; const unsigned int keypoint_num = 1; const unsigned int num_gpus = 2; const unsigned int num_streams = 1; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int y_sub_size = y_size / num_gpus; const unsigned int dx = x_sub_size; const unsigned int dy = y_sub_size; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); const unsigned int orihist_radius = static_cast<unsigned int>(sift_params.xyScale * 3.0); const unsigned int xyiradius = static_cast<unsigned int>(1.414 * sift_params.xyScale * sift_params.MagFactor * (sift_params.IndexSize + 1) / 2.0); const unsigned int dw = (num_gpus == 1 ? 0 : ::max(orihist_radius, xyiradius) + 1); cudautils::Sub2Ind sub2ind(20,20,20); // create img thrust::host_vector<double> image(image_size); image[sub2ind( 9, 9,10)] = 1; image[sub2ind( 9,10,10)] = 2; image[sub2ind( 9,11,10)] = 3; image[sub2ind(10, 9,10)] = 2; image[sub2ind(10,10,10)] = 5; // keypoint image[sub2ind(10,11,10)] = 1; image[sub2ind(11, 9,10)] = 1; image[sub2ind(11,10,10)] = 3; image[sub2ind(11,11,10)] = 2; image[sub2ind( 9, 9, 9)] = 0; image[sub2ind( 9,10, 9)] = 1; image[sub2ind( 9,11, 9)] = 2; image[sub2ind(10, 9, 9)] = 1; image[sub2ind(10,10, 9)] = 3; image[sub2ind(10,11, 9)] = 0; image[sub2ind(11, 9, 9)] = 0; image[sub2ind(11,10, 9)] = 2; image[sub2ind(11,11, 9)] = 1; // create map thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); map[sub2ind(10,10,10)] = 0.0; // select this point for processing std::shared_ptr<cudautils::Sift> ni = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor(num_gpus, num_streams, ni); ni->setImage(thrust::raw_pointer_cast(image.data())); ni->setMap(thrust::raw_pointer_cast(map.data())); executor.run(); cudautils::Keypoint_store keystore; ni->getKeystore(&keystore); std::vector<int> correct_idx = { 1, 2, 3, 9, 10, 11, 19, 33, 34, 35, 50, 51, 59, 68, 69, 70, 71, 84, 85, 86, 87, 108, 109, 110, 111, 127, 129, 132, 133, 137, 140, 141, 145, 153, 161, 164, 165, 181, 189, 194, 195, 198, 199, 202, 203, 206, 207, 211, 219, 223, 226, 227, 230, 231, 238, 247, 255, 259, 261, 263, 267, 269, 271, 277, 291, 293, 295, 311, 322, 324, 326, 332, 334, 338, 340, 342, 350, 354, 356, 358, 387, 393, 397, 409, 411, 423, 437, 448, 449, 456, 458, 460, 466, 472, 474, 500, 514, 515, 523, 567, 579, 614, 615, 638 }; std::vector<int> correct_val = { 11, 33, 66, 11, 82, 91, 53, 11, 33, 66, 49, 102, 59, 11, 35, 58, 113, 11, 35, 58, 113, 11, 35, 58, 113, 29, 38, 11, 33, 11, 11, 33, 68, 48, 11, 11, 80, 47, 88, 22, 11, 33, 33, 22, 72, 33, 113, 55, 28, 21, 22, 11, 33, 33, 19, 113, 28, 33, 11, 56, 33, 11, 113, 46, 33, 11, 56, 30, 33, 11, 55, 57, 29, 33, 11, 113, 89, 33, 11, 55, 78, 38, 40, 19, 21, 53, 40, 11, 11, 11, 23, 21, 58, 38, 64, 21, 10, 21, 27, 27, 21, 55, 28, 31 }; for (int i=0; i < keystore.len; i++) { cudautils::Keypoint key = keystore.buf[i]; // printf("Keypoint:%d\n", i); for (int j=0; j < sift_params.descriptor_len; j++) { auto it = std::find(correct_idx.begin(), correct_idx.end(), j); if (it != correct_idx.end()) { // printf("\t%d: %d\n", j, (int) key.ivec[j]); int pos = it - correct_idx.begin(); ASSERT_EQ(correct_val[pos], (int) key.ivec[j]); } else { ASSERT_EQ(0, (int) key.ivec[j]); } } } free(fv_centers); } TEST_F(SiftTest, SiftSimple5Test) { const unsigned int x_size = 100; const unsigned int y_size = 100; const unsigned int z_size = 100; const unsigned int keypoint_num = 4; unsigned int num_gpus = 1; unsigned int num_streams = 1; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int dx = x_sub_size; unsigned int y_sub_size = y_size / num_gpus; unsigned int dy = y_sub_size; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); const unsigned int orihist_radius = static_cast<unsigned int>(sift_params.xyScale * 3.0); const unsigned int xyiradius = static_cast<unsigned int>(1.414 * sift_params.xyScale * sift_params.MagFactor * (sift_params.IndexSize + 1) / 2.0); unsigned int dw = (num_gpus == 1 ? 0 : ::max(orihist_radius, xyiradius) + 1); cudautils::Sub2Ind sub2ind(100,100,100); // create img thrust::host_vector<double> image(image_size); thrust::generate(image.begin(), image.end(), rand); // create map thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); // select this point for processing map[sub2ind(50,50,50)] = 0.0; map[sub2ind(20,20,20)] = 0.0; map[sub2ind(30,70,30)] = 0.0; map[sub2ind(80,80,80)] = 0.0; // pattern 1 std::shared_ptr<cudautils::Sift> ni1 = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor1(num_gpus, num_streams, ni1); ni1->setImage(thrust::raw_pointer_cast(image.data())); ni1->setMap(thrust::raw_pointer_cast(map.data())); executor1.run(); cudautils::Keypoint_store keystore1; ni1->getKeystore(&keystore1); // pattern 2 num_gpus = 2; num_streams = 2; y_sub_size = y_size / num_gpus; dy = 20; dw = (num_gpus == 1 ? 0 : ::max(orihist_radius, xyiradius) + 1); std::shared_ptr<cudautils::Sift> ni2 = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor2(num_gpus, num_streams, ni2); ni2->setImage(thrust::raw_pointer_cast(image.data())); ni2->setMap(thrust::raw_pointer_cast(map.data())); executor2.run(); cudautils::Keypoint_store keystore2; ni2->getKeystore(&keystore2); ASSERT_EQ(keystore1.len, keystore2.len); for (int i=0; i < keystore1.len; i++) { logger_->info("key[{}]", i); cudautils::Keypoint key1 = keystore1.buf[i]; cudautils::Keypoint key2; bool is_found = false; for (int k=0; k < keystore2.len; k++) { key2 = keystore2.buf[k]; if (key1.x == key2.x && key1.y == key2.y && key1.z == key2.z) { is_found = true; break; } } if (! is_found) { FAIL(); } ASSERT_EQ(key1.xyScale, key2.xyScale); ASSERT_EQ(key1.tScale, key2.tScale); for (int j=0; j < sift_params.descriptor_len; j++) { // logger_->info("key[{}] ivec[{}] {}, {}", i, j, key1.ivec[j], key2.ivec[j]); ASSERT_EQ((int) key1.ivec[j], (int) key2.ivec[j]); } } free(fv_centers); } TEST_F(SiftTest, SiftSimple7Test) { int hw_num_gpus = cudautils::get_gpu_num(); if (hw_num_gpus < 3) { printf("*** WARNING: This test case requires more than 3 gpus.\n"); return; } const unsigned int x_size = 1024; const unsigned int y_size = 1024; const unsigned int z_size = 126; const unsigned int keypoint_num = 100; unsigned int num_gpus = 1; unsigned int num_streams = 1; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int dx = x_sub_size; unsigned int y_sub_size = y_size / num_gpus; unsigned int dy = y_sub_size; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); const unsigned int orihist_radius = static_cast<unsigned int>(sift_params.xyScale * 3.0); const unsigned int xyiradius = static_cast<unsigned int>(1.414 * sift_params.xyScale * sift_params.MagFactor * (sift_params.IndexSize + 1) / 2.0); unsigned int dw = (num_gpus == 1 ? 0 : ::max(orihist_radius, xyiradius) + 1); cudautils::Sub2Ind sub2ind(x_size, y_size, z_size); // create img logger_->info("start to generate image"); thrust::host_vector<double> image(image_size); thrust::generate(image.begin(), image.end(), rand); logger_->info("end of image generation"); // create map logger_->info("start to generate map"); thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); logger_->info("end of map generation"); // select this point for processing logger_->info("start to generate keypoints"); thrust::minstd_rand rng; thrust::uniform_int_distribution<int> dist_x(0,x_size-1); thrust::uniform_int_distribution<int> dist_y(0,y_size-1); thrust::uniform_int_distribution<int> dist_z(0,z_size-1); for (int i = 0; i < keypoint_num; i++) { int key_x = dist_x(rng); int key_y = dist_y(rng); int key_z = dist_z(rng); map[sub2ind(key_x,key_y,key_z)] = 0.0; } logger_->info("end of keypoints generation"); // pattern 1 std::shared_ptr<cudautils::Sift> ni1 = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor1(num_gpus, num_streams, ni1); ni1->setImage(thrust::raw_pointer_cast(image.data())); ni1->setMap(thrust::raw_pointer_cast(map.data())); executor1.run(); cudautils::Keypoint_store keystore1; ni1->getKeystore(&keystore1); // pattern 2 num_gpus = 3; num_streams = 20; y_sub_size = (y_size + num_gpus - 1) / num_gpus; dy = 256; dw = (num_gpus == 1 ? 0 : ::max(orihist_radius, xyiradius) + 1); std::shared_ptr<cudautils::Sift> ni2 = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor2(num_gpus, num_streams, ni2); ni2->setImage(thrust::raw_pointer_cast(image.data())); ni2->setMap(thrust::raw_pointer_cast(map.data())); executor2.run(); cudautils::Keypoint_store keystore2; ni2->getKeystore(&keystore2); ASSERT_EQ(keystore1.len, keystore2.len); for (int i=0; i < keystore1.len; i++) { // logger_->info("key[{}]", i); cudautils::Keypoint key1 = keystore1.buf[i]; cudautils::Keypoint key2; bool is_found = false; for (int k=0; k < keystore2.len; k++) { key2 = keystore2.buf[k]; if (key1.x == key2.x && key1.y == key2.y && key1.z == key2.z) { is_found = true; break; } } if (! is_found) { FAIL(); } ASSERT_EQ(key1.xyScale, key2.xyScale); ASSERT_EQ(key1.tScale, key2.tScale); for (int j=0; j < sift_params.descriptor_len; j++) { // logger_->info("key[{}] ivec[{}] {}, {}", i, j, key1.ivec[j], key2.ivec[j]); ASSERT_EQ((int) key1.ivec[j], (int) key2.ivec[j]); } } free(fv_centers); } TEST_F(SiftTest, SiftSimple6Test) { const unsigned int x_size = 2000; const unsigned int y_size = 2000; const unsigned int z_size = 100; const unsigned int keypoint_num = 100; unsigned int num_gpus = 1; unsigned int num_streams = 1; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int dx = x_sub_size; unsigned int y_sub_size = y_size / num_gpus; unsigned int dy = y_sub_size; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); const unsigned int orihist_radius = static_cast<unsigned int>(sift_params.xyScale * 3.0); const unsigned int xyiradius = static_cast<unsigned int>(1.414 * sift_params.xyScale * sift_params.MagFactor * (sift_params.IndexSize + 1) / 2.0); unsigned int dw = (num_gpus == 1 ? 0 : ::max(orihist_radius, xyiradius) + 1); cudautils::Sub2Ind sub2ind(x_size, y_size, z_size); // create img logger_->info("start to generate image"); thrust::host_vector<double> image(image_size); thrust::generate(image.begin(), image.end(), rand); logger_->info("end of image generation"); // create map logger_->info("start to generate map"); thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); logger_->info("end of map generation"); // select this point for processing // thrust::host_vector<int> key_x(keypoint_num); // thrust::host_vector<int> key_y(keypoint_num); // thrust::host_vector<int> key_z(keypoint_num); logger_->info("start to generate keypoints"); thrust::minstd_rand rng; thrust::uniform_int_distribution<int> dist_x(0,x_size-1); thrust::uniform_int_distribution<int> dist_y(0,y_size-1); thrust::uniform_int_distribution<int> dist_z(0,z_size-1); for (int i = 0; i < keypoint_num; i++) { int key_x = dist_x(rng); int key_y = dist_y(rng); int key_z = dist_z(rng); map[sub2ind(key_x,key_y,key_z)] = 0.0; } logger_->info("end of keypoints generation"); // pattern 1 std::shared_ptr<cudautils::Sift> ni1 = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor1(num_gpus, num_streams, ni1); ni1->setImage(thrust::raw_pointer_cast(image.data())); ni1->setMap(thrust::raw_pointer_cast(map.data())); executor1.run(); cudautils::Keypoint_store keystore1; ni1->getKeystore(&keystore1); // pattern 2 num_gpus = 2; num_streams = 2; y_sub_size = y_size / num_gpus; dy = 20; dw = (num_gpus == 1 ? 0 : ::max(orihist_radius, xyiradius) + 1); std::shared_ptr<cudautils::Sift> ni2 = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor2(num_gpus, num_streams, ni2); ni2->setImage(thrust::raw_pointer_cast(image.data())); ni2->setMap(thrust::raw_pointer_cast(map.data())); executor2.run(); cudautils::Keypoint_store keystore2; ni2->getKeystore(&keystore2); ASSERT_EQ(keystore1.len, keystore2.len); for (int i=0; i < keystore1.len; i++) { // logger_->info("key[{}]", i); cudautils::Keypoint key1 = keystore1.buf[i]; cudautils::Keypoint key2; bool is_found = false; for (int k=0; k < keystore2.len; k++) { key2 = keystore2.buf[k]; if (key1.x == key2.x && key1.y == key2.y && key1.z == key2.z) { is_found = true; break; } } if (! is_found) { FAIL(); } ASSERT_EQ(key1.xyScale, key2.xyScale); ASSERT_EQ(key1.tScale, key2.tScale); for (int j=0; j < sift_params.descriptor_len; j++) { // logger_->info("key[{}] ivec[{}] {}, {}", i, j, key1.ivec[j], key2.ivec[j]); ASSERT_EQ((int) key1.ivec[j], (int) key2.ivec[j]); } } free(fv_centers); } // Check output with saved binary TEST_F(SiftTest, CheckBinaryTest) { std::string in_image_filename1("test_img.bin"); std::string in_map_filename2("test_map.bin"); std::string out_filename("gtest_output.bin"); // Compares to test_output.bin file in tests/ unsigned int x_size, y_size, z_size, x_size1, y_size1, z_size1; std::ifstream fin1(in_image_filename1, std::ios::binary); if (fin1.is_open()) { fin1.read((char*)&x_size, sizeof(unsigned int)); fin1.read((char*)&y_size, sizeof(unsigned int)); fin1.read((char*)&z_size, sizeof(unsigned int)); } else { throw std::invalid_argument( "Unable to open or find file: `test_img.bin` in current directory"); } std::ifstream fin2(in_map_filename2, std::ios::binary); if (fin2.is_open()) { fin2.read((char*)&x_size1, sizeof(unsigned int)); fin2.read((char*)&y_size1, sizeof(unsigned int)); fin2.read((char*)&z_size1, sizeof(unsigned int)); } else { throw std::invalid_argument( "Unable to open or find file: `test_map.bin` in current directory"); } if (x_size != x_size1 || y_size != y_size1 || z_size != z_size1) { logger_->error("the dimension of image and map is not the same. image({},{},{}), map({},{},{})", x_size, y_size, z_size, x_size1, y_size1, z_size1); fin1.close(); fin2.close(); /*ASSERT_TRUE(false);*/ throw std::invalid_argument("Dimension of image and map is not the same"); } std::vector<double> in_image(x_size * y_size * z_size); std::vector<int8_t> in_map (x_size * y_size * z_size); fin1.read((char*)in_image.data(), x_size * y_size * z_size * sizeof(double)); fin2.read((char*)in_map.data(), x_size * y_size * z_size * sizeof(int8_t)); fin1.close(); fin2.close(); cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, 0); int stream_num = 20; int x_substream_stride = 256; int y_substream_stride = 256; int num_gpus = cudautils::get_gpu_num(); logger_->info("# of gpus = {}", num_gpus); logger_->info("# of streams = {}", stream_num); const unsigned int x_sub_size = x_size; const unsigned int y_sub_size = y_size / num_gpus; const unsigned int dw = 0; const unsigned int dx = min(x_substream_stride, x_sub_size); const unsigned int dy = min(y_substream_stride, y_sub_size); std::shared_ptr<cudautils::Sift> ni = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, stream_num, sift_params, fv_centers); cudautils::CudaTaskExecutor executor(num_gpus, stream_num, ni); ni->setImage(thrust::raw_pointer_cast(in_image.data())); ni->setMap(thrust::raw_pointer_cast(in_map.data())); executor.run(); cudautils::Keypoint_store keystore; ni->getKeystore(&keystore); FILE* pFile = fopen(out_filename.c_str(), "w"); if (pFile != NULL) { // print keystore for (int i=0; i < keystore.len; i++) { cudautils::Keypoint key = keystore.buf[i]; fprintf(pFile, "Keypoint:%d\n", i); for (int j=0; j < sift_params.descriptor_len; j++) { fprintf(pFile, "\t%d: %d\n", j, (int) key.ivec[j]); } } fclose(pFile); } else { throw std::invalid_argument( "Unable to open output file\nMake sure `test_output.bin` is in current dir: tests/"); } ASSERT_EQ(system("diff test_output.bin gtest_output.bin"), 0); free(fv_centers); } // Disabled to speed up quick test runs TEST_F(SiftTest, SiftFullImageTest) { const unsigned int x_size = 1024; const unsigned int y_size = 1024; const unsigned int z_size = 126; const unsigned int keypoint_num = 1; const unsigned int num_gpus = 2; const unsigned int num_streams = 20; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int y_sub_size = y_size / num_gpus; const unsigned int dx = 256; const unsigned int dy = 256; const unsigned int dw = 0; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); // create img thrust::host_vector<double> image(image_size); thrust::generate(image.begin(), image.end(), rand); // create map thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); long long idx; for (int i=0; i < keypoint_num; i++) { // warning not evenly distributed across the image // chosen to roughly distribute across GPUs idx = (x_size * rand()) % image_size; map[idx] = 0.0; // select this point for processing } std::shared_ptr<cudautils::Sift> ni = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor(num_gpus, num_streams, ni); ni->setImage(thrust::raw_pointer_cast(image.data())); ni->setMap(thrust::raw_pointer_cast(map.data())); executor.run(); cudautils::Keypoint_store keystore; ni->getKeystore(&keystore); free(fv_centers); } }
1321b266327c761d15448257798c5fd63cdb5d2e.cu
#include "gtest/gtest.h" #include "sift.h" #include "cuda_task_executor.h" #include "gpudevice.h" #include <vector> #include <iterator> #include <algorithm> #include <cublas_v2.h> #include <stdexcept> #include "spdlog/spdlog.h" #include <iostream> #include <vector> #include <string> #include <memory> #include <cstdint> #include <random> #include <thrust/host_vector.h> #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/uniform_int_distribution.h> namespace { class SiftTest : public ::testing::Test { protected: std::shared_ptr<spdlog::logger> logger_; SiftTest() { logger_ = spdlog::get("console"); if (! logger_) { logger_ = spdlog::stdout_logger_mt("console"); } spdlog::set_level(spdlog::level::trace); //spdlog::set_level(spdlog::level::info); } virtual ~SiftTest() { } template <class T> void print_arr(T * arr, int N) { printf("print_arr\n"); for (int i=0; i < N; i++) { logger_->debug("\t[{}]={}", i, arr[i]); /*printf("\t[%d]=%.5f\n", i, arr[i]);*/ } } }; TEST_F(SiftTest, DotProductTest) { int cols = 3; double first[3] = {1,2,3}; double *dfirst; cudaMalloc(&dfirst, sizeof(double) * cols); cudaMemcpy(dfirst, first, sizeof(double) * cols, cudaMemcpyHostToDevice); double second[3] = {5, .5, 2}; double* dsecond; cudaMalloc(&dsecond, sizeof(double) * cols); cudaMemcpy(dsecond, second, sizeof(double) * cols, cudaMemcpyHostToDevice); double out[1] = {0}; double *dout; cudaMalloc(&dout, sizeof(double)); cudautils::dot_product_wrap<<<1,1>>>(dfirst, dsecond, dout, 1, cols); cudaMemcpy(out, dout, sizeof(double), cudaMemcpyDeviceToHost); ASSERT_EQ(*out, 12); // 3x3 matrix int rows = 3; double matrix[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; double *dmatrix; cudaMalloc(&dmatrix, sizeof(double) * 9); cudaMemcpy(dmatrix, matrix, sizeof(double) * 9, cudaMemcpyHostToDevice); double vec[3] = {3, 1, 2}; double *dvec; cudaMalloc(&dvec, sizeof(double) * 3); cudaMemcpy(dvec, vec, sizeof(double) * 3, cudaMemcpyHostToDevice); double* dout_arr; cudaMalloc(&dout_arr, rows * sizeof(double)); cudautils::dot_product_wrap<<<1,1>>>(dmatrix, dvec, dout_arr, rows, cols); double* out_arr = (double*) malloc(rows * sizeof(double)); cudaMemcpy(out_arr, dout_arr, sizeof(double) * rows, cudaMemcpyDeviceToHost); double answer[3] = {11, 29, 47}; for (int i=0; i < rows; i++) ASSERT_EQ(out_arr[i], answer[i]); cudaFree(dfirst); cudaFree(dsecond); cudaFree(dout); cudaFree(dmatrix); cudaFree(dvec); cudaFree(dout_arr); free(out_arr); } TEST_F(SiftTest, GetBinIdxTest) { cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, 0, 0, 0, 0); const int IndexSize = sift_params.IndexSize; double xySpacing = (double) sift_params.xyScale * sift_params.MagFactor; double tSpacing = (double) sift_params.tScale * sift_params.MagFactor; int xyiradius = round(1.414 * xySpacing * (sift_params.IndexSize + 1) / 2.0); int tiradius = round(1.414 * tSpacing * (sift_params.IndexSize + 1) / 2.0); // Surrounding radius of pixels are binned for computation // according to sift_params.IndexSize int *i_bin, *j_bin, *k_bin; int hi, hj, hk; cudaMalloc(&i_bin, sizeof(int)); cudaMalloc(&j_bin, sizeof(int)); cudaMalloc(&k_bin, sizeof(int)); for (int i = -xyiradius; i <= xyiradius; i++) { for (int j = -xyiradius; j <= xyiradius; j++) { for (int k = -tiradius; k <= tiradius; k++) { // Find bin idx cudautils::get_bin_idx_wrap<<<1,1>>>(i, xyiradius, IndexSize, i_bin); cudautils::get_bin_idx_wrap<<<1,1>>>(j, xyiradius, IndexSize, j_bin); cudautils::get_bin_idx_wrap<<<1,1>>>(k, tiradius, IndexSize, k_bin); cudaMemcpy(&hi, i_bin, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&hj, j_bin, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&hk, k_bin, sizeof(int), cudaMemcpyDeviceToHost); ASSERT_GE(hi, 0); ASSERT_LT(hi, sift_params.IndexSize); ASSERT_GE(hj, 0); ASSERT_LT(hj, sift_params.IndexSize); ASSERT_GE(hk, 0); ASSERT_LT(hk, sift_params.IndexSize); } } } cudaFree(i_bin); cudaFree(j_bin); cudaFree(k_bin); free(fv_centers); } TEST_F(SiftTest, BinSub2Ind1Test) { cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, 0, 0, 0, 0); int* bin_index; cudaMalloc(&bin_index, sizeof(int)); int temp; for (int i=0; i < sift_params.IndexSize; i++) { for (int j=0; j < sift_params.IndexSize; j++) { for (int k=0; k < sift_params.IndexSize; k++) { for (uint16_t ixi=0; ixi < sift_params.nFaces; ixi++) { cudautils::bin_sub2ind_wrap<<<1,1>>>(i, j, k, ixi, sift_params, bin_index); cudaMemcpy(&temp, bin_index, sizeof(int), cudaMemcpyDeviceToHost); ASSERT_LT(temp, sift_params.descriptor_len); } } } } int max = sift_params.descriptor_len - 1; cudautils::bin_sub2ind_wrap<<<1,1>>>(1,1,1,79, sift_params, bin_index); cudaMemcpy(&temp, bin_index, sizeof(int), cudaMemcpyDeviceToHost); ASSERT_EQ(temp, max); cudautils::bin_sub2ind_wrap<<<1,1>>>(0,1,1,79, sift_params, bin_index); cudaMemcpy(&temp, bin_index, sizeof(int), cudaMemcpyDeviceToHost); ASSERT_EQ(temp, max - 1); cudautils::bin_sub2ind_wrap<<<1,1>>>(1,0,1,79, sift_params, bin_index); cudaMemcpy(&temp, bin_index, sizeof(int), cudaMemcpyDeviceToHost); ASSERT_EQ(temp, max - 2); cudautils::bin_sub2ind_wrap<<<1,1>>>(1,1,0,79, sift_params, bin_index); cudaMemcpy(&temp, bin_index, sizeof(int), cudaMemcpyDeviceToHost); ASSERT_EQ(temp, max - 4); cudautils::bin_sub2ind_wrap<<<1,1>>>(1,1,1,78, sift_params, bin_index); cudaMemcpy(&temp, bin_index, sizeof(int), cudaMemcpyDeviceToHost); ASSERT_EQ(temp, max - 8); cudaFree(bin_index); free(fv_centers); } TEST_F(SiftTest, Sub2Ind1Test) { cudautils::Sub2Ind sub2ind(3,4,5); ASSERT_EQ(0, sub2ind(0,0,0)); ASSERT_EQ(1, sub2ind(1,0,0)); ASSERT_EQ(7, sub2ind(1,2,0)); ASSERT_EQ(43, sub2ind(1,2,3)); } TEST_F(SiftTest, Sub2Ind2Test) { cudautils::Sub2Ind sub2ind(3,4,5,1,1,1); // 16 = sub2ind(1,1,1) ASSERT_EQ(16, sub2ind(0,0,0)); ASSERT_EQ(17, sub2ind(1,0,0)); ASSERT_EQ(23, sub2ind(1,2,0)); ASSERT_EQ(59, sub2ind(1,2,3)); } TEST_F(SiftTest, Sub2Ind3Test) { cudautils::Sub2Ind sub2ind(3,4,5); sub2ind.setBasePos(1,1,1); // 9 = sub2ind(1,1,1) ASSERT_EQ(16, sub2ind(0,0,0)); ASSERT_EQ(17, sub2ind(1,0,0)); ASSERT_EQ(23, sub2ind(1,2,0)); ASSERT_EQ(59, sub2ind(1,2,3)); } TEST_F(SiftTest, Ind2Sub1Test) { unsigned int x_stride = 4; unsigned int y_stride = 5; unsigned int x = 0; unsigned int y = 0; unsigned int z = 0; cudautils::ind2sub(x_stride, y_stride, 2, x, y, z); ASSERT_EQ(2, x); ASSERT_EQ(0, y); ASSERT_EQ(0, z); x = y = z = 0; cudautils::ind2sub(x_stride, y_stride, 7, x, y, z); ASSERT_EQ(3, x); ASSERT_EQ(1, y); ASSERT_EQ(0, z); x = y = z = 0; cudautils::ind2sub(x_stride, y_stride, 25, x, y, z); ASSERT_EQ(1, x); ASSERT_EQ(1, y); ASSERT_EQ(1, z); } TEST_F(SiftTest, get_grad_ori_vectorTest) { const unsigned int x_size = 3; const unsigned int y_size = 3; const unsigned int z_size = 3; long long N = x_size * y_size * z_size; const unsigned int keypoint_num = 1; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); sift_params.MagFactor = 1; sift_params.Tessel_thresh = 1; double *image, *h_image; // create img h_image = (double*) malloc(sizeof(double) * N); cudaMalloc(&image, sizeof(double) * N); for (int i=0; i < N; i++) { h_image[i] = (double)rand() / RAND_MAX * 10.0; // logger_->info("image[{}]={}", i, h_image[i]); } cudaMemcpy(image, h_image, N * sizeof(double), cudaMemcpyHostToDevice); double* device_centers; // sift_params.fv_centers must be placed on device since array passed to cuda kernel // default fv_centers_len 80 * 3 (3D) = 240; cudaSafeCall(cudaMalloc((void **) &device_centers, sizeof(double) * sift_params.fv_centers_len)); cudaSafeCall(cudaMemcpy((void *) device_centers, (const void *) fv_centers, (size_t) sizeof(double) * sift_params.fv_centers_len, cudaMemcpyHostToDevice)); long long idx = 13; // center of cube int r = 1; int c = 1; int t = 1; unsigned int x_stride = x_size; unsigned int y_stride = y_size; double* yy, *h_yy; cudaMalloc(&yy, sizeof(double) * sift_params.nFaces); h_yy = (double*) malloc(sizeof(double) * sift_params.nFaces); uint16_t* ix; cudaMalloc(&ix, sizeof(uint16_t) * sift_params.fv_centers_len); double vect[3] = {0.0, 0.0, 0.0}; double *dvect; cudaMalloc(&dvect, sizeof(double) * 3); cudaMemcpy(dvect, vect, sizeof(double) * 3, cudaMemcpyHostToDevice); double* mag; cudaMalloc(&mag, sizeof(double)); cudautils::get_grad_ori_vector_wrap<<<1,1>>>(image, idx, x_stride, y_stride, r, c, t, dvect, yy, ix, sift_params, device_centers, mag); cudaMemcpy(h_yy, yy, sizeof(double) * sift_params.nFaces, cudaMemcpyDeviceToHost); // make sure yy's values are descending double max = h_yy[0]; for (int i=1; i < sift_params.nFaces; i++) { // logger_->info("yy[{}]={}", i, h_yy[i]); ASSERT_GE(max, h_yy[i]); max = h_yy[i]; //update } free(fv_centers); } TEST_F(SiftTest, SiftBoundaryTest) { const unsigned int x_size = 10; const unsigned int y_size = 10; const unsigned int z_size = 6; long long N = x_size * y_size * z_size; const unsigned int keypoint_num = 1; const unsigned int num_gpus = 2; const unsigned int num_streams = 1; const unsigned int x_sub_size = x_size; const unsigned int y_sub_size = y_size / num_gpus; const unsigned int dx = x_sub_size; const unsigned int dy = y_sub_size; const unsigned int dw = 0; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); /*// create img*/ /*double *image, *h_image; */ /*int8_t *map, *h_map;*/ /*h_image = (double*) malloc(sizeof(double) * N);*/ /*h_map = (int8_t*) malloc(sizeof(int8_t) * N);*/ /*cudaMalloc(&image, sizeof(double) * N);*/ /*cudaMalloc(&map, sizeof(int8_t) * N);*/ /*for (int i=0; i < N; i++) {*/ /*h_image[i] = i;*/ /*h_map[i] = 1;*/ /*}*/ // create img thrust::host_vector<double> image(N); thrust::generate(image.begin(), image.end(), rand); // create map thrust::host_vector<int8_t> map(N); thrust::fill_n(map.begin(), N, 1.0); // test a keypoint in the last pixel map[N - 1] = 0.0; //select for processing // investigate a boundary point between GPUs int x = 5; int y = 5; int z = 3; long long idx = x + x_size * y + x_size * y_size * z; map[idx] = 0.0; // select this point for processing /*cudaMemcpy(image, h_image, N * sizeof(double), cudaMemcpyHostToDevice);*/ /*cudaMemcpy(map, h_map, N * sizeof(int8_t), cudaMemcpyHostToDevice);*/ std::shared_ptr<cudautils::Sift> ni = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor(num_gpus, num_streams, ni); ni->setImage(image.data()); ni->setMap(map.data()); executor.run(); free(fv_centers); } TEST_F(SiftTest, SiftSimpleTest) { const unsigned int x_size = 10; const unsigned int y_size = 10; const unsigned int z_size = 6; const unsigned int keypoint_num = 1; const unsigned int num_gpus = 1; const unsigned int num_streams = 1; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int y_sub_size = y_size / num_gpus; const unsigned int dx = x_sub_size; const unsigned int dy = y_sub_size; const unsigned int dw = 0; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); // create img thrust::host_vector<double> image(image_size); thrust::generate(image.begin(), image.end(), rand); // create map thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); long long idx; for (int i=0; i < keypoint_num; i++) { // warning not evenly distributed across the image // chosen to roughly distribute across GPUs (y axis) idx = (x_size * rand()) % image_size; map[idx] = 0.0; // select this point for processing } std::shared_ptr<cudautils::Sift> ni = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor(num_gpus, num_streams, ni); ni->setImage(thrust::raw_pointer_cast(image.data())); ni->setMap(thrust::raw_pointer_cast(map.data())); executor.run(); free(fv_centers); } TEST_F(SiftTest, SiftSimple2Test) { const unsigned int x_size = 3; const unsigned int y_size = 3; const unsigned int z_size = 3; const unsigned int keypoint_num = 1; const unsigned int num_gpus = 1; const unsigned int num_streams = 1; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int y_sub_size = y_size / num_gpus; const unsigned int dx = x_sub_size; const unsigned int dy = y_sub_size; const unsigned int dw = 0; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); cudautils::Sub2Ind sub2ind(3,3,3); // create img thrust::host_vector<double> image(image_size); image[sub2ind(1,1,1)] = 1; // create map thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); map[sub2ind(1,1,1)] = 0.0; // select this point for processing std::shared_ptr<cudautils::Sift> ni = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor(num_gpus, num_streams, ni); ni->setImage(thrust::raw_pointer_cast(image.data())); ni->setMap(thrust::raw_pointer_cast(map.data())); executor.run(); cudautils::Keypoint_store keystore; ni->getKeystore(&keystore); std::vector<int> correct_idx = { 3, 11, 35, 71, 87, 111, 133, 141, 165, 199, 207, 231, 263, 271, 295, 326, 342, 358 }; for (int i=0; i < keystore.len; i++) { cudautils::Keypoint key = keystore.buf[i]; // printf("Keypoint:%d\n", i); for (int j=0; j < sift_params.descriptor_len; j++) { if (std::find(correct_idx.begin(), correct_idx.end(), j) != correct_idx.end()) { // printf("\t%d: %d\n", j, (int) key.ivec[j]); ASSERT_EQ(121, (int) key.ivec[j]); } else { ASSERT_EQ( 0, (int) key.ivec[j]); } } } free(fv_centers); } TEST_F(SiftTest, SiftSimple3Test) { const unsigned int x_size = 5; const unsigned int y_size = 5; const unsigned int z_size = 5; const unsigned int keypoint_num = 1; const unsigned int num_gpus = 1; const unsigned int num_streams = 1; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int y_sub_size = y_size / num_gpus; const unsigned int dx = x_sub_size; const unsigned int dy = y_sub_size; const unsigned int dw = 0; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); cudautils::Sub2Ind sub2ind(5,5,5); // create img thrust::host_vector<double> image(image_size); image[sub2ind(1,1,2)] = 1; image[sub2ind(1,2,2)] = 2; image[sub2ind(1,3,2)] = 3; image[sub2ind(2,1,2)] = 2; image[sub2ind(2,2,2)] = 5; // keypoint image[sub2ind(2,3,2)] = 1; image[sub2ind(3,1,2)] = 1; image[sub2ind(3,2,2)] = 3; image[sub2ind(3,3,2)] = 2; image[sub2ind(1,1,1)] = 0; image[sub2ind(1,2,1)] = 1; image[sub2ind(1,3,1)] = 2; image[sub2ind(2,1,1)] = 1; image[sub2ind(2,2,1)] = 3; image[sub2ind(2,3,1)] = 0; image[sub2ind(3,1,1)] = 0; image[sub2ind(3,2,1)] = 2; image[sub2ind(3,3,1)] = 1; // create map thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); map[sub2ind(2,2,2)] = 0.0; // select this point for processing std::shared_ptr<cudautils::Sift> ni = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor(num_gpus, num_streams, ni); ni->setImage(thrust::raw_pointer_cast(image.data())); ni->setMap(thrust::raw_pointer_cast(map.data())); executor.run(); cudautils::Keypoint_store keystore; ni->getKeystore(&keystore); std::vector<int> correct_idx = { 1, 2, 3, 9, 10, 11, 19, 33, 34, 35, 50, 51, 59, 68, 69, 70, 71, 84, 85, 86, 87, 108, 109, 110, 111, 127, 129, 132, 133, 137, 140, 141, 145, 153, 161, 164, 165, 181, 189, 194, 195, 198, 199, 202, 203, 206, 207, 211, 219, 223, 226, 227, 230, 231, 238, 247, 255, 259, 261, 263, 267, 269, 271, 277, 291, 293, 295, 311, 322, 324, 326, 332, 334, 338, 340, 342, 350, 354, 356, 358, 387, 393, 397, 409, 411, 423, 437, 448, 449, 456, 458, 460, 466, 472, 474, 500, 514, 515, 523, 567, 579, 614, 615, 638 }; std::vector<int> correct_val = { 17, 49, 100, 17, 86, 109, 40, 17, 49, 100, 37, 77, 45, 9, 26, 44, 97, 9, 26, 44, 97, 9, 26, 44, 109, 22, 37, 17, 50, 17, 17, 50, 51, 36, 17, 17, 86, 35, 67, 33, 16, 50, 50, 33, 63, 50, 109, 42, 21, 16, 33, 16, 50, 50, 15, 107, 21, 49, 17, 84, 49, 17, 109, 35, 49, 17, 84, 22, 49, 17, 84, 43, 22, 49, 17, 109, 68, 49, 17, 84, 59, 29, 30, 14, 16, 40, 30, 8, 8, 8, 17, 16, 43, 28, 48, 16, 7, 16, 20, 20, 16, 41, 21, 23 }; for (int i=0; i < keystore.len; i++) { cudautils::Keypoint key = keystore.buf[i]; // printf("Keypoint:%d\n", i); for (int j=0; j < sift_params.descriptor_len; j++) { auto it = std::find(correct_idx.begin(), correct_idx.end(), j); if (it != correct_idx.end()) { int pos = it - correct_idx.begin(); // printf("\t%d: %d\n", j, (int) key.ivec[j]); ASSERT_EQ(correct_val[pos], (int) key.ivec[j]); } else { ASSERT_EQ(0, (int) key.ivec[j]); } } } free(fv_centers); } TEST_F(SiftTest, SiftSimple4Test) { const unsigned int x_size = 20; const unsigned int y_size = 20; const unsigned int z_size = 20; const unsigned int keypoint_num = 1; const unsigned int num_gpus = 2; const unsigned int num_streams = 1; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int y_sub_size = y_size / num_gpus; const unsigned int dx = x_sub_size; const unsigned int dy = y_sub_size; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); const unsigned int orihist_radius = static_cast<unsigned int>(sift_params.xyScale * 3.0); const unsigned int xyiradius = static_cast<unsigned int>(1.414 * sift_params.xyScale * sift_params.MagFactor * (sift_params.IndexSize + 1) / 2.0); const unsigned int dw = (num_gpus == 1 ? 0 : std::max(orihist_radius, xyiradius) + 1); cudautils::Sub2Ind sub2ind(20,20,20); // create img thrust::host_vector<double> image(image_size); image[sub2ind( 9, 9,10)] = 1; image[sub2ind( 9,10,10)] = 2; image[sub2ind( 9,11,10)] = 3; image[sub2ind(10, 9,10)] = 2; image[sub2ind(10,10,10)] = 5; // keypoint image[sub2ind(10,11,10)] = 1; image[sub2ind(11, 9,10)] = 1; image[sub2ind(11,10,10)] = 3; image[sub2ind(11,11,10)] = 2; image[sub2ind( 9, 9, 9)] = 0; image[sub2ind( 9,10, 9)] = 1; image[sub2ind( 9,11, 9)] = 2; image[sub2ind(10, 9, 9)] = 1; image[sub2ind(10,10, 9)] = 3; image[sub2ind(10,11, 9)] = 0; image[sub2ind(11, 9, 9)] = 0; image[sub2ind(11,10, 9)] = 2; image[sub2ind(11,11, 9)] = 1; // create map thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); map[sub2ind(10,10,10)] = 0.0; // select this point for processing std::shared_ptr<cudautils::Sift> ni = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor(num_gpus, num_streams, ni); ni->setImage(thrust::raw_pointer_cast(image.data())); ni->setMap(thrust::raw_pointer_cast(map.data())); executor.run(); cudautils::Keypoint_store keystore; ni->getKeystore(&keystore); std::vector<int> correct_idx = { 1, 2, 3, 9, 10, 11, 19, 33, 34, 35, 50, 51, 59, 68, 69, 70, 71, 84, 85, 86, 87, 108, 109, 110, 111, 127, 129, 132, 133, 137, 140, 141, 145, 153, 161, 164, 165, 181, 189, 194, 195, 198, 199, 202, 203, 206, 207, 211, 219, 223, 226, 227, 230, 231, 238, 247, 255, 259, 261, 263, 267, 269, 271, 277, 291, 293, 295, 311, 322, 324, 326, 332, 334, 338, 340, 342, 350, 354, 356, 358, 387, 393, 397, 409, 411, 423, 437, 448, 449, 456, 458, 460, 466, 472, 474, 500, 514, 515, 523, 567, 579, 614, 615, 638 }; std::vector<int> correct_val = { 11, 33, 66, 11, 82, 91, 53, 11, 33, 66, 49, 102, 59, 11, 35, 58, 113, 11, 35, 58, 113, 11, 35, 58, 113, 29, 38, 11, 33, 11, 11, 33, 68, 48, 11, 11, 80, 47, 88, 22, 11, 33, 33, 22, 72, 33, 113, 55, 28, 21, 22, 11, 33, 33, 19, 113, 28, 33, 11, 56, 33, 11, 113, 46, 33, 11, 56, 30, 33, 11, 55, 57, 29, 33, 11, 113, 89, 33, 11, 55, 78, 38, 40, 19, 21, 53, 40, 11, 11, 11, 23, 21, 58, 38, 64, 21, 10, 21, 27, 27, 21, 55, 28, 31 }; for (int i=0; i < keystore.len; i++) { cudautils::Keypoint key = keystore.buf[i]; // printf("Keypoint:%d\n", i); for (int j=0; j < sift_params.descriptor_len; j++) { auto it = std::find(correct_idx.begin(), correct_idx.end(), j); if (it != correct_idx.end()) { // printf("\t%d: %d\n", j, (int) key.ivec[j]); int pos = it - correct_idx.begin(); ASSERT_EQ(correct_val[pos], (int) key.ivec[j]); } else { ASSERT_EQ(0, (int) key.ivec[j]); } } } free(fv_centers); } TEST_F(SiftTest, SiftSimple5Test) { const unsigned int x_size = 100; const unsigned int y_size = 100; const unsigned int z_size = 100; const unsigned int keypoint_num = 4; unsigned int num_gpus = 1; unsigned int num_streams = 1; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int dx = x_sub_size; unsigned int y_sub_size = y_size / num_gpus; unsigned int dy = y_sub_size; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); const unsigned int orihist_radius = static_cast<unsigned int>(sift_params.xyScale * 3.0); const unsigned int xyiradius = static_cast<unsigned int>(1.414 * sift_params.xyScale * sift_params.MagFactor * (sift_params.IndexSize + 1) / 2.0); unsigned int dw = (num_gpus == 1 ? 0 : std::max(orihist_radius, xyiradius) + 1); cudautils::Sub2Ind sub2ind(100,100,100); // create img thrust::host_vector<double> image(image_size); thrust::generate(image.begin(), image.end(), rand); // create map thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); // select this point for processing map[sub2ind(50,50,50)] = 0.0; map[sub2ind(20,20,20)] = 0.0; map[sub2ind(30,70,30)] = 0.0; map[sub2ind(80,80,80)] = 0.0; // pattern 1 std::shared_ptr<cudautils::Sift> ni1 = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor1(num_gpus, num_streams, ni1); ni1->setImage(thrust::raw_pointer_cast(image.data())); ni1->setMap(thrust::raw_pointer_cast(map.data())); executor1.run(); cudautils::Keypoint_store keystore1; ni1->getKeystore(&keystore1); // pattern 2 num_gpus = 2; num_streams = 2; y_sub_size = y_size / num_gpus; dy = 20; dw = (num_gpus == 1 ? 0 : std::max(orihist_radius, xyiradius) + 1); std::shared_ptr<cudautils::Sift> ni2 = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor2(num_gpus, num_streams, ni2); ni2->setImage(thrust::raw_pointer_cast(image.data())); ni2->setMap(thrust::raw_pointer_cast(map.data())); executor2.run(); cudautils::Keypoint_store keystore2; ni2->getKeystore(&keystore2); ASSERT_EQ(keystore1.len, keystore2.len); for (int i=0; i < keystore1.len; i++) { logger_->info("key[{}]", i); cudautils::Keypoint key1 = keystore1.buf[i]; cudautils::Keypoint key2; bool is_found = false; for (int k=0; k < keystore2.len; k++) { key2 = keystore2.buf[k]; if (key1.x == key2.x && key1.y == key2.y && key1.z == key2.z) { is_found = true; break; } } if (! is_found) { FAIL(); } ASSERT_EQ(key1.xyScale, key2.xyScale); ASSERT_EQ(key1.tScale, key2.tScale); for (int j=0; j < sift_params.descriptor_len; j++) { // logger_->info("key[{}] ivec[{}] {}, {}", i, j, key1.ivec[j], key2.ivec[j]); ASSERT_EQ((int) key1.ivec[j], (int) key2.ivec[j]); } } free(fv_centers); } TEST_F(SiftTest, SiftSimple7Test) { int hw_num_gpus = cudautils::get_gpu_num(); if (hw_num_gpus < 3) { printf("*** WARNING: This test case requires more than 3 gpus.\n"); return; } const unsigned int x_size = 1024; const unsigned int y_size = 1024; const unsigned int z_size = 126; const unsigned int keypoint_num = 100; unsigned int num_gpus = 1; unsigned int num_streams = 1; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int dx = x_sub_size; unsigned int y_sub_size = y_size / num_gpus; unsigned int dy = y_sub_size; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); const unsigned int orihist_radius = static_cast<unsigned int>(sift_params.xyScale * 3.0); const unsigned int xyiradius = static_cast<unsigned int>(1.414 * sift_params.xyScale * sift_params.MagFactor * (sift_params.IndexSize + 1) / 2.0); unsigned int dw = (num_gpus == 1 ? 0 : std::max(orihist_radius, xyiradius) + 1); cudautils::Sub2Ind sub2ind(x_size, y_size, z_size); // create img logger_->info("start to generate image"); thrust::host_vector<double> image(image_size); thrust::generate(image.begin(), image.end(), rand); logger_->info("end of image generation"); // create map logger_->info("start to generate map"); thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); logger_->info("end of map generation"); // select this point for processing logger_->info("start to generate keypoints"); thrust::minstd_rand rng; thrust::uniform_int_distribution<int> dist_x(0,x_size-1); thrust::uniform_int_distribution<int> dist_y(0,y_size-1); thrust::uniform_int_distribution<int> dist_z(0,z_size-1); for (int i = 0; i < keypoint_num; i++) { int key_x = dist_x(rng); int key_y = dist_y(rng); int key_z = dist_z(rng); map[sub2ind(key_x,key_y,key_z)] = 0.0; } logger_->info("end of keypoints generation"); // pattern 1 std::shared_ptr<cudautils::Sift> ni1 = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor1(num_gpus, num_streams, ni1); ni1->setImage(thrust::raw_pointer_cast(image.data())); ni1->setMap(thrust::raw_pointer_cast(map.data())); executor1.run(); cudautils::Keypoint_store keystore1; ni1->getKeystore(&keystore1); // pattern 2 num_gpus = 3; num_streams = 20; y_sub_size = (y_size + num_gpus - 1) / num_gpus; dy = 256; dw = (num_gpus == 1 ? 0 : std::max(orihist_radius, xyiradius) + 1); std::shared_ptr<cudautils::Sift> ni2 = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor2(num_gpus, num_streams, ni2); ni2->setImage(thrust::raw_pointer_cast(image.data())); ni2->setMap(thrust::raw_pointer_cast(map.data())); executor2.run(); cudautils::Keypoint_store keystore2; ni2->getKeystore(&keystore2); ASSERT_EQ(keystore1.len, keystore2.len); for (int i=0; i < keystore1.len; i++) { // logger_->info("key[{}]", i); cudautils::Keypoint key1 = keystore1.buf[i]; cudautils::Keypoint key2; bool is_found = false; for (int k=0; k < keystore2.len; k++) { key2 = keystore2.buf[k]; if (key1.x == key2.x && key1.y == key2.y && key1.z == key2.z) { is_found = true; break; } } if (! is_found) { FAIL(); } ASSERT_EQ(key1.xyScale, key2.xyScale); ASSERT_EQ(key1.tScale, key2.tScale); for (int j=0; j < sift_params.descriptor_len; j++) { // logger_->info("key[{}] ivec[{}] {}, {}", i, j, key1.ivec[j], key2.ivec[j]); ASSERT_EQ((int) key1.ivec[j], (int) key2.ivec[j]); } } free(fv_centers); } TEST_F(SiftTest, SiftSimple6Test) { const unsigned int x_size = 2000; const unsigned int y_size = 2000; const unsigned int z_size = 100; const unsigned int keypoint_num = 100; unsigned int num_gpus = 1; unsigned int num_streams = 1; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int dx = x_sub_size; unsigned int y_sub_size = y_size / num_gpus; unsigned int dy = y_sub_size; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); const unsigned int orihist_radius = static_cast<unsigned int>(sift_params.xyScale * 3.0); const unsigned int xyiradius = static_cast<unsigned int>(1.414 * sift_params.xyScale * sift_params.MagFactor * (sift_params.IndexSize + 1) / 2.0); unsigned int dw = (num_gpus == 1 ? 0 : std::max(orihist_radius, xyiradius) + 1); cudautils::Sub2Ind sub2ind(x_size, y_size, z_size); // create img logger_->info("start to generate image"); thrust::host_vector<double> image(image_size); thrust::generate(image.begin(), image.end(), rand); logger_->info("end of image generation"); // create map logger_->info("start to generate map"); thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); logger_->info("end of map generation"); // select this point for processing // thrust::host_vector<int> key_x(keypoint_num); // thrust::host_vector<int> key_y(keypoint_num); // thrust::host_vector<int> key_z(keypoint_num); logger_->info("start to generate keypoints"); thrust::minstd_rand rng; thrust::uniform_int_distribution<int> dist_x(0,x_size-1); thrust::uniform_int_distribution<int> dist_y(0,y_size-1); thrust::uniform_int_distribution<int> dist_z(0,z_size-1); for (int i = 0; i < keypoint_num; i++) { int key_x = dist_x(rng); int key_y = dist_y(rng); int key_z = dist_z(rng); map[sub2ind(key_x,key_y,key_z)] = 0.0; } logger_->info("end of keypoints generation"); // pattern 1 std::shared_ptr<cudautils::Sift> ni1 = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor1(num_gpus, num_streams, ni1); ni1->setImage(thrust::raw_pointer_cast(image.data())); ni1->setMap(thrust::raw_pointer_cast(map.data())); executor1.run(); cudautils::Keypoint_store keystore1; ni1->getKeystore(&keystore1); // pattern 2 num_gpus = 2; num_streams = 2; y_sub_size = y_size / num_gpus; dy = 20; dw = (num_gpus == 1 ? 0 : std::max(orihist_radius, xyiradius) + 1); std::shared_ptr<cudautils::Sift> ni2 = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor2(num_gpus, num_streams, ni2); ni2->setImage(thrust::raw_pointer_cast(image.data())); ni2->setMap(thrust::raw_pointer_cast(map.data())); executor2.run(); cudautils::Keypoint_store keystore2; ni2->getKeystore(&keystore2); ASSERT_EQ(keystore1.len, keystore2.len); for (int i=0; i < keystore1.len; i++) { // logger_->info("key[{}]", i); cudautils::Keypoint key1 = keystore1.buf[i]; cudautils::Keypoint key2; bool is_found = false; for (int k=0; k < keystore2.len; k++) { key2 = keystore2.buf[k]; if (key1.x == key2.x && key1.y == key2.y && key1.z == key2.z) { is_found = true; break; } } if (! is_found) { FAIL(); } ASSERT_EQ(key1.xyScale, key2.xyScale); ASSERT_EQ(key1.tScale, key2.tScale); for (int j=0; j < sift_params.descriptor_len; j++) { // logger_->info("key[{}] ivec[{}] {}, {}", i, j, key1.ivec[j], key2.ivec[j]); ASSERT_EQ((int) key1.ivec[j], (int) key2.ivec[j]); } } free(fv_centers); } // Check output with saved binary TEST_F(SiftTest, CheckBinaryTest) { std::string in_image_filename1("test_img.bin"); std::string in_map_filename2("test_map.bin"); std::string out_filename("gtest_output.bin"); // Compares to test_output.bin file in tests/ unsigned int x_size, y_size, z_size, x_size1, y_size1, z_size1; std::ifstream fin1(in_image_filename1, std::ios::binary); if (fin1.is_open()) { fin1.read((char*)&x_size, sizeof(unsigned int)); fin1.read((char*)&y_size, sizeof(unsigned int)); fin1.read((char*)&z_size, sizeof(unsigned int)); } else { throw std::invalid_argument( "Unable to open or find file: `test_img.bin` in current directory"); } std::ifstream fin2(in_map_filename2, std::ios::binary); if (fin2.is_open()) { fin2.read((char*)&x_size1, sizeof(unsigned int)); fin2.read((char*)&y_size1, sizeof(unsigned int)); fin2.read((char*)&z_size1, sizeof(unsigned int)); } else { throw std::invalid_argument( "Unable to open or find file: `test_map.bin` in current directory"); } if (x_size != x_size1 || y_size != y_size1 || z_size != z_size1) { logger_->error("the dimension of image and map is not the same. image({},{},{}), map({},{},{})", x_size, y_size, z_size, x_size1, y_size1, z_size1); fin1.close(); fin2.close(); /*ASSERT_TRUE(false);*/ throw std::invalid_argument("Dimension of image and map is not the same"); } std::vector<double> in_image(x_size * y_size * z_size); std::vector<int8_t> in_map (x_size * y_size * z_size); fin1.read((char*)in_image.data(), x_size * y_size * z_size * sizeof(double)); fin2.read((char*)in_map.data(), x_size * y_size * z_size * sizeof(int8_t)); fin1.close(); fin2.close(); cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, 0); int stream_num = 20; int x_substream_stride = 256; int y_substream_stride = 256; int num_gpus = cudautils::get_gpu_num(); logger_->info("# of gpus = {}", num_gpus); logger_->info("# of streams = {}", stream_num); const unsigned int x_sub_size = x_size; const unsigned int y_sub_size = y_size / num_gpus; const unsigned int dw = 0; const unsigned int dx = min(x_substream_stride, x_sub_size); const unsigned int dy = min(y_substream_stride, y_sub_size); std::shared_ptr<cudautils::Sift> ni = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, stream_num, sift_params, fv_centers); cudautils::CudaTaskExecutor executor(num_gpus, stream_num, ni); ni->setImage(thrust::raw_pointer_cast(in_image.data())); ni->setMap(thrust::raw_pointer_cast(in_map.data())); executor.run(); cudautils::Keypoint_store keystore; ni->getKeystore(&keystore); FILE* pFile = fopen(out_filename.c_str(), "w"); if (pFile != NULL) { // print keystore for (int i=0; i < keystore.len; i++) { cudautils::Keypoint key = keystore.buf[i]; fprintf(pFile, "Keypoint:%d\n", i); for (int j=0; j < sift_params.descriptor_len; j++) { fprintf(pFile, "\t%d: %d\n", j, (int) key.ivec[j]); } } fclose(pFile); } else { throw std::invalid_argument( "Unable to open output file\nMake sure `test_output.bin` is in current dir: tests/"); } ASSERT_EQ(system("diff test_output.bin gtest_output.bin"), 0); free(fv_centers); } // Disabled to speed up quick test runs TEST_F(SiftTest, SiftFullImageTest) { const unsigned int x_size = 1024; const unsigned int y_size = 1024; const unsigned int z_size = 126; const unsigned int keypoint_num = 1; const unsigned int num_gpus = 2; const unsigned int num_streams = 20; long image_size = x_size * y_size * z_size; const unsigned int x_sub_size = x_size; const unsigned int y_sub_size = y_size / num_gpus; const unsigned int dx = 256; const unsigned int dy = 256; const unsigned int dw = 0; cudautils::SiftParams sift_params; double* fv_centers = sift_defaults(&sift_params, x_size, y_size, z_size, keypoint_num); // create img thrust::host_vector<double> image(image_size); thrust::generate(image.begin(), image.end(), rand); // create map thrust::host_vector<int8_t> map(image_size); thrust::fill_n(map.begin(), image_size, 1.0); long long idx; for (int i=0; i < keypoint_num; i++) { // warning not evenly distributed across the image // chosen to roughly distribute across GPUs idx = (x_size * rand()) % image_size; map[idx] = 0.0; // select this point for processing } std::shared_ptr<cudautils::Sift> ni = std::make_shared<cudautils::Sift>(x_size, y_size, z_size, x_sub_size, y_sub_size, dx, dy, dw, num_gpus, num_streams, sift_params, fv_centers); cudautils::CudaTaskExecutor executor(num_gpus, num_streams, ni); ni->setImage(thrust::raw_pointer_cast(image.data())); ni->setMap(thrust::raw_pointer_cast(map.data())); executor.run(); cudautils::Keypoint_store keystore; ni->getKeystore(&keystore); free(fv_centers); } }
25ae6a52e7b34c8c397f4b83b0c9bd9ea02d65ce.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <chrono> #include <hip/hip_runtime.h> static void CheckError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); } } #define CHECK_ERROR( err ) (CheckError( err, __FILE__, __LINE__ )) #define BLOCK_SIZE 256 template <typename T> __global__ void BlockRangeAtomicOnGlobalMem(T* data, int n) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for ( unsigned int i = tid; i < n; i += blockDim.x*gridDim.x){ atomicAdd(data+threadIdx.x, (T)1); //arbitrary number to add } } template <typename T> __global__ void WarpRangeAtomicOnGlobalMem(T* data, int n) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for ( unsigned int i = tid; i < n; i += blockDim.x*gridDim.x){ atomicAdd(data+(i & 0x1F), (T)1); //arbitrary number to add } } template <typename T> __global__ void SingleRangeAtomicOnGlobalMem(T* data, int offset, int n) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for ( unsigned int i = tid; i < n; i += blockDim.x*gridDim.x){ atomicAdd(data+offset, (T)1); //arbitrary number to add } } template <typename T> __global__ void BlockRangeAtomicOnSharedMem(T* data, int n) { __shared__ T smem_data[BLOCK_SIZE]; unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for ( unsigned int i = tid; i < n; i += blockDim.x*gridDim.x){ atomicAdd(smem_data+threadIdx.x, (T)1); } if (blockIdx.x == gridDim.x) data[threadIdx.x] = smem_data[threadIdx.x]; } template <typename T> __global__ void WarpRangeAtomicOnSharedMem(T* data, int n) { __shared__ T smem_data[32]; unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for ( unsigned int i = tid; i < n; i += blockDim.x*gridDim.x){ atomicAdd(smem_data+(i & 0x1F), (T)1); } if (blockIdx.x == gridDim.x && threadIdx.x < 0x1F) data[threadIdx.x] = smem_data[threadIdx.x]; } template <typename T> __global__ void SingleRangeAtomicOnSharedMem(T* data, int offset, int n) { __shared__ T smem_data[BLOCK_SIZE]; unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for ( unsigned int i = tid; i < n; i += blockDim.x*gridDim.x){ atomicAdd(smem_data + offset, (T)1); } if (blockIdx.x == gridDim.x && threadIdx.x == 0) data[threadIdx.x] = smem_data[threadIdx.x]; } template <typename T> void atomicPerf (int n, int t, int repeat) { size_t data_size = sizeof(T) * t; T* data = (T*) malloc (data_size); for(int i=0; i<t; i++) { data[i] = i%1024+1; } T* d_data; CHECK_ERROR( hipMalloc((void **)&d_data, data_size) ); dim3 block (BLOCK_SIZE); dim3 grid (n / BLOCK_SIZE); CHECK_ERROR( hipMemcpy(d_data, data, data_size, hipMemcpyHostToDevice) ); CHECK_ERROR( hipDeviceSynchronize() ); auto start = std::chrono::steady_clock::now(); for(int i=0; i<repeat; i++) { hipLaunchKernelGGL(( BlockRangeAtomicOnGlobalMem<T>), dim3(grid), dim3(block), 0, 0, d_data, n); } CHECK_ERROR( hipDeviceSynchronize() ); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of BlockRangeAtomicOnGlobalMem: %f (us)\n", time * 1e-3f / repeat); CHECK_ERROR( hipMemcpy(d_data, data, data_size, hipMemcpyHostToDevice) ); CHECK_ERROR( hipDeviceSynchronize() ); start = std::chrono::steady_clock::now(); for(int i=0; i<repeat; i++) { hipLaunchKernelGGL(( WarpRangeAtomicOnGlobalMem<T>), dim3(grid), dim3(block), 0, 0, d_data, n); } CHECK_ERROR( hipDeviceSynchronize() ); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of WarpRangeAtomicOnGlobalMem: %f (us)\n", time * 1e-3f / repeat); CHECK_ERROR( hipMemcpy(d_data, data, data_size, hipMemcpyHostToDevice) ); CHECK_ERROR( hipDeviceSynchronize() ); start = std::chrono::steady_clock::now(); for(int i=0; i<repeat; i++) { hipLaunchKernelGGL(( SingleRangeAtomicOnGlobalMem<T>), dim3(grid), dim3(block), 0, 0, d_data, i % BLOCK_SIZE, n); } CHECK_ERROR( hipDeviceSynchronize() ); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of SingleRangeAtomicOnGlobalMem: %f (us)\n", time * 1e-3f / repeat); CHECK_ERROR( hipMemcpy(d_data, data, data_size, hipMemcpyHostToDevice) ); CHECK_ERROR( hipDeviceSynchronize() ); start = std::chrono::steady_clock::now(); for(int i=0; i<repeat; i++) { hipLaunchKernelGGL(( BlockRangeAtomicOnSharedMem<T>), dim3(grid), dim3(block), 0, 0, d_data, n); } CHECK_ERROR( hipDeviceSynchronize() ); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of BlockRangeAtomicOnSharedMem: %f (us)\n", time * 1e-3f / repeat); CHECK_ERROR( hipMemcpy(d_data, data, data_size, hipMemcpyHostToDevice) ); CHECK_ERROR( hipDeviceSynchronize() ); start = std::chrono::steady_clock::now(); for(int i=0; i<repeat; i++) { hipLaunchKernelGGL(( WarpRangeAtomicOnSharedMem<T>), dim3(grid), dim3(block), 0, 0, d_data, n); } CHECK_ERROR( hipDeviceSynchronize() ); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of WarpRangeAtomicOnSharedMem: %f (us)\n", time * 1e-3f / repeat); CHECK_ERROR( hipMemcpy(d_data, data, data_size, hipMemcpyHostToDevice) ); CHECK_ERROR( hipDeviceSynchronize() ); start = std::chrono::steady_clock::now(); for(int i=0; i<repeat; i++) { hipLaunchKernelGGL(( SingleRangeAtomicOnSharedMem<T>), dim3(grid), dim3(block), 0, 0, d_data, i % BLOCK_SIZE, n); } CHECK_ERROR( hipDeviceSynchronize() ); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of SingleRangeAtomicOnSharedMem: %f (us)\n", time * 1e-3f / repeat); free(data); hipFree(d_data); } int main(int argc, char* argv[]) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } const int repeat = atoi(argv[1]); const int n = 3*4*7*8*9*256; // number of threads const int len = 1024; // data array length printf("\nFP64 atomic add\n"); atomicPerf<double>(n, len, repeat); printf("\nINT32 atomic add\n"); atomicPerf<int>(n, len, repeat); printf("\nFP32 atomic add\n"); atomicPerf<float>(n, len, repeat); return 0; }
25ae6a52e7b34c8c397f4b83b0c9bd9ea02d65ce.cu
#include <stdio.h> #include <stdlib.h> #include <chrono> #include <cuda.h> static void CheckError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); } } #define CHECK_ERROR( err ) (CheckError( err, __FILE__, __LINE__ )) #define BLOCK_SIZE 256 template <typename T> __global__ void BlockRangeAtomicOnGlobalMem(T* data, int n) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for ( unsigned int i = tid; i < n; i += blockDim.x*gridDim.x){ atomicAdd(data+threadIdx.x, (T)1); //arbitrary number to add } } template <typename T> __global__ void WarpRangeAtomicOnGlobalMem(T* data, int n) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for ( unsigned int i = tid; i < n; i += blockDim.x*gridDim.x){ atomicAdd(data+(i & 0x1F), (T)1); //arbitrary number to add } } template <typename T> __global__ void SingleRangeAtomicOnGlobalMem(T* data, int offset, int n) { unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for ( unsigned int i = tid; i < n; i += blockDim.x*gridDim.x){ atomicAdd(data+offset, (T)1); //arbitrary number to add } } template <typename T> __global__ void BlockRangeAtomicOnSharedMem(T* data, int n) { __shared__ T smem_data[BLOCK_SIZE]; unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for ( unsigned int i = tid; i < n; i += blockDim.x*gridDim.x){ atomicAdd(smem_data+threadIdx.x, (T)1); } if (blockIdx.x == gridDim.x) data[threadIdx.x] = smem_data[threadIdx.x]; } template <typename T> __global__ void WarpRangeAtomicOnSharedMem(T* data, int n) { __shared__ T smem_data[32]; unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for ( unsigned int i = tid; i < n; i += blockDim.x*gridDim.x){ atomicAdd(smem_data+(i & 0x1F), (T)1); } if (blockIdx.x == gridDim.x && threadIdx.x < 0x1F) data[threadIdx.x] = smem_data[threadIdx.x]; } template <typename T> __global__ void SingleRangeAtomicOnSharedMem(T* data, int offset, int n) { __shared__ T smem_data[BLOCK_SIZE]; unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x; for ( unsigned int i = tid; i < n; i += blockDim.x*gridDim.x){ atomicAdd(smem_data + offset, (T)1); } if (blockIdx.x == gridDim.x && threadIdx.x == 0) data[threadIdx.x] = smem_data[threadIdx.x]; } template <typename T> void atomicPerf (int n, int t, int repeat) { size_t data_size = sizeof(T) * t; T* data = (T*) malloc (data_size); for(int i=0; i<t; i++) { data[i] = i%1024+1; } T* d_data; CHECK_ERROR( cudaMalloc((void **)&d_data, data_size) ); dim3 block (BLOCK_SIZE); dim3 grid (n / BLOCK_SIZE); CHECK_ERROR( cudaMemcpy(d_data, data, data_size, cudaMemcpyHostToDevice) ); CHECK_ERROR( cudaDeviceSynchronize() ); auto start = std::chrono::steady_clock::now(); for(int i=0; i<repeat; i++) { BlockRangeAtomicOnGlobalMem<T><<<grid, block>>>(d_data, n); } CHECK_ERROR( cudaDeviceSynchronize() ); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of BlockRangeAtomicOnGlobalMem: %f (us)\n", time * 1e-3f / repeat); CHECK_ERROR( cudaMemcpy(d_data, data, data_size, cudaMemcpyHostToDevice) ); CHECK_ERROR( cudaDeviceSynchronize() ); start = std::chrono::steady_clock::now(); for(int i=0; i<repeat; i++) { WarpRangeAtomicOnGlobalMem<T><<<grid, block>>>(d_data, n); } CHECK_ERROR( cudaDeviceSynchronize() ); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of WarpRangeAtomicOnGlobalMem: %f (us)\n", time * 1e-3f / repeat); CHECK_ERROR( cudaMemcpy(d_data, data, data_size, cudaMemcpyHostToDevice) ); CHECK_ERROR( cudaDeviceSynchronize() ); start = std::chrono::steady_clock::now(); for(int i=0; i<repeat; i++) { SingleRangeAtomicOnGlobalMem<T><<<grid, block>>>(d_data, i % BLOCK_SIZE, n); } CHECK_ERROR( cudaDeviceSynchronize() ); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of SingleRangeAtomicOnGlobalMem: %f (us)\n", time * 1e-3f / repeat); CHECK_ERROR( cudaMemcpy(d_data, data, data_size, cudaMemcpyHostToDevice) ); CHECK_ERROR( cudaDeviceSynchronize() ); start = std::chrono::steady_clock::now(); for(int i=0; i<repeat; i++) { BlockRangeAtomicOnSharedMem<T><<<grid, block>>>(d_data, n); } CHECK_ERROR( cudaDeviceSynchronize() ); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of BlockRangeAtomicOnSharedMem: %f (us)\n", time * 1e-3f / repeat); CHECK_ERROR( cudaMemcpy(d_data, data, data_size, cudaMemcpyHostToDevice) ); CHECK_ERROR( cudaDeviceSynchronize() ); start = std::chrono::steady_clock::now(); for(int i=0; i<repeat; i++) { WarpRangeAtomicOnSharedMem<T><<<grid, block>>>(d_data, n); } CHECK_ERROR( cudaDeviceSynchronize() ); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of WarpRangeAtomicOnSharedMem: %f (us)\n", time * 1e-3f / repeat); CHECK_ERROR( cudaMemcpy(d_data, data, data_size, cudaMemcpyHostToDevice) ); CHECK_ERROR( cudaDeviceSynchronize() ); start = std::chrono::steady_clock::now(); for(int i=0; i<repeat; i++) { SingleRangeAtomicOnSharedMem<T><<<grid, block>>>(d_data, i % BLOCK_SIZE, n); } CHECK_ERROR( cudaDeviceSynchronize() ); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of SingleRangeAtomicOnSharedMem: %f (us)\n", time * 1e-3f / repeat); free(data); cudaFree(d_data); } int main(int argc, char* argv[]) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } const int repeat = atoi(argv[1]); const int n = 3*4*7*8*9*256; // number of threads const int len = 1024; // data array length printf("\nFP64 atomic add\n"); atomicPerf<double>(n, len, repeat); printf("\nINT32 atomic add\n"); atomicPerf<int>(n, len, repeat); printf("\nFP32 atomic add\n"); atomicPerf<float>(n, len, repeat); return 0; }
bbd1cf9287ff73b132d91a8bc0dcf000165b8c41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/ceil_div.h> #include <ATen/Dispatch.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <ATen/native/hip/LaunchUtils.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/_adaptive_avg_pool2d_backward_native.h> #include <ATen/ops/_adaptive_avg_pool2d_native.h> #include <ATen/ops/empty.h> #include <ATen/ops/zeros_like.h> #endif #include <algorithm> #include <cfloat> #include <cmath> #define START_IND(a,b,c) (int)::floor((float)(a * c) / b) #define END_IND(a,b,c) (int)::ceil((float)((a + 1) * c) / b) #define START_IND_INT(a,b,c) ((a * c) / b) #define END_IND_INT(a,b,c) (((a + 1) * c + b - 1) / b) // #define START_IND(a,b,c) a * c / b // #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0 #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit #define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched namespace at { namespace native { namespace { // 4d tensor B x D x H x W // All kernels view batch dim B and feature dim D as collapsed. /* * Description: * this function adaptively average pools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output */ template <typename T> __global__ void adaptive_average_pool(T *input, T *output, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators on output pixels int oh, ow; // select input/output plane based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; output = output + o_plane*osizeH*osizeW; input = input + i_plane*istrideD; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y*gridDim.y; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling over corresponding input pixels T *ptr_input = input + istartH*istrideH + istartW*istrideW; T *ptr_output = output + oh*osizeW + ow; T sum = static_cast<T>(0); int ih, iw; for(ih = 0; ih < kH; ++ih) { for(iw = 0; iw < kW; ++iw) { T val = ptr_input[iw*istrideW]; sum += val; } ptr_input += istrideH; // next input line } // Update output *ptr_output = sum / kH / kW; } } } /* * Description: * this function computes the gradInput from gradOutput */ template <typename T> __global__ void adaptive_average_gradinput( T *gradInput, T *gradOutput, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators on input pixels int ih, iw; // select input/output plane based on thread/block ID int i_plane = blockIdx.x; int o_plane = i_plane; gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; int istartH = blockDim.y*blockIdx.y + threadIdx.y; int iendH = isizeH; int istepH = blockDim.y*gridDim.y; int istartW = threadIdx.x; int iendW = isizeW; int istepW = blockDim.x; // compute gradInput for(ih = istartH; ih < iendH; ih += istepH) { int ostartH = START_IND(ih, isizeH, osizeH); int oendH = END_IND(ih, isizeH, osizeH); for(iw = istartW; iw < iendW; iw += istepW) { int ostartW = START_IND(iw, isizeW, osizeW); int oendW = END_IND(iw, isizeW, osizeW); // Compute the gradients over corresponding output pixels T *ptr_gradInput = gradInput + ih*isizeW + iw; int oh, ow; for(oh = ostartH; oh < oendH; ++oh) { int kH = START_IND(oh, osizeH, isizeH) - END_IND(oh, osizeH, isizeH); for(ow = ostartW; ow < oendW; ++ow) { int kW = START_IND(ow, osizeW, isizeW) - END_IND(ow, osizeW, isizeW); T grad_delta = gradOutput[ow + oh*osizeW] / kH / kW; *ptr_gradInput += grad_delta; } } } } } /* * Description: * this function computes the gradInput from gradOutput * (uses atomic add) */ template <typename T> __global__ void atomic_adaptive_average_gradinput( T *gradInput, T *gradOutput, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators on output indices int oh, ow; // select input/output plane based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; gradOutput = gradOutput + o_plane*osizeW*osizeH; gradInput = gradInput + i_plane*isizeW*isizeH; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the gradients for over corresponding input pixels T *ptr_gradInput = gradInput + istartH*isizeW + istartW; T *ptr_gradOutput = gradOutput + oh*osizeW + ow; T grad_delta = *ptr_gradOutput / kW / kH; int ih, iw; for(ih = 0; ih < kH; ++ih) { for(iw = 0; iw < kW; ++iw) { // atomic add since different threads could update same variable gpuAtomicAddNoReturn(&(ptr_gradInput[iw]), grad_delta); } ptr_gradInput += isizeW; // next input line } } } } /* * Description: * this function adaptively average pools an input 4D tensor along dimensions 2 and 3 * NHWC layout for both input and output tensor * 4D input, 4D output */ template <typename index_t, typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void adaptive_average_pool_nhwc(const scalar_t* __restrict__ input, scalar_t* __restrict__ output, int sizeB, int sizeC, int isizeH, int isizeW, int osizeH, int osizeW, int kernel_stride_C, int kernel_size_C, index_t istrideB, index_t istrideC, index_t istrideH, index_t istrideW) { extern __shared__ int smem[]; scalar_t *out_cached = reinterpret_cast<scalar_t*>(smem); // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = scalar_t(0.0); } __syncthreads(); // each CTA handles a portion of a single slice on batch dimension; int batch_id = blockIdx.x % sizeB; int channel_id = blockIdx.x / sizeB; int channel_offset = threadIdx.x + channel_id * blockDim.x; // each CTA handles a single slice on batch dimension; // We use gridDim.x to handle striding on C as well. output = output + batch_id * osizeH * osizeW * sizeC; input = input + batch_id * istrideB; // split out_cached and exclusively it assigned to each thread; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C * blockDim.x]; // iterate on output H & W. // Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on // tile so there's a better chance to hit L1 cache. index_t oH = (osizeH + gridDim.z-1) / gridDim.z; index_t oW = (osizeW + gridDim.y-1) / gridDim.y; index_t ostartH = threadIdx.z + blockIdx.z*oH; index_t oendH = ::min(ostartH+oH, osizeH); index_t ostartW = threadIdx.y + blockIdx.y*oW; index_t oendW = ::min(ostartW+oW, osizeW); // Stride for threads, each warp can reuse L1 as they go. So theoretically // better chance to survive cache eviction. for (int oh = ostartH; oh < oendH; oh+=blockDim.z) { int istartH = START_IND_INT(oh, osizeH, isizeH); int iendH = END_IND_INT(oh, osizeH, isizeH); for (int ow = ostartW; ow < oendW; ow+=blockDim.y) { int istartW = START_IND_INT(ow, osizeW, isizeW); int iendW = END_IND_INT(ow, osizeW, isizeW); scalar_t factor = scalar_t(1.0) / ((iendH-istartH) * (iendW-istartW)); // loop on input: hierarchy h->w->c, use shared memory here hopefully // would not stall global memory read; for (index_t ih = istartH; ih < iendH; ih++) { for (index_t iw = istartW; iw < iendW; iw++) { int cached_index = threadIdx.x; const scalar_t *ptr_input = input + ih*istrideH + iw*istrideW; for (index_t c = channel_offset; c < sizeC; c += blockDim.x*kernel_stride_C) { out_cached[cached_index] += ptr_input[c*istrideC]; cached_index += blockDim.x; } } } scalar_t *ptr_output = output + (oh * osizeW + ow) * sizeC; int cached_index = threadIdx.x; // write accumulated output to global memory; for (index_t c = channel_offset; c < sizeC; c += blockDim.x*kernel_stride_C) { // This causes numerical issueptr when unit test with NCHW kernel; // switch to could verify the correctness; // output[c] = out_cached[c] / (iendH-istartH) / (iendW-istartW); ptr_output[c] = out_cached[cached_index] * factor; out_cached[cached_index] = scalar_t(0.0); cached_index += blockDim.x; } // no need to __syncthreads() since out_cached is not shared. } } } /* * Description: * this function computes the gradInput from gradOutput * NHWC layout for both input and output tensor * 4D input, 4D output */ template <typename index_t, typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void adaptive_average_gradinput_nhwc(scalar_t* __restrict__ gradInput, const scalar_t* __restrict__ gradOutput, int sizeB, int sizeC, int isizeH, int isizeW, int osizeH, int osizeW, int kernel_stride_C, int kernel_size_C, index_t ostrideB, index_t ostrideC, index_t ostrideH, index_t ostrideW) { extern __shared__ int smem[]; index_t *ostartW_cached = smem; index_t *oendW_cached = &ostartW_cached[isizeW]; // be careful with alignment, in case scalar_t is fp16, we want to assign // int pointers first. scalar_t *r_kW_cached = reinterpret_cast<scalar_t*>(&oendW_cached[isizeW]); scalar_t *r_kH_cached = &r_kW_cached[osizeW]; scalar_t *out_cached = &r_kH_cached[osizeH]; // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // Precompute output start/end index per input index on width dimension; // Not doing this for height dimension, as that's our out-most loop. for (index_t i = thread_id; i < isizeW; i+= block_size) { ostartW_cached[i] = START_IND_INT(i, isizeW, osizeW); oendW_cached[i] = END_IND_INT(i, isizeW, osizeW); } // Precompute pooling height/weight factor for each output element; // This is used to weight output gradient when accumulate them on input // gradient. // Technically we don't have to compute it for the whole `osizeH`, since // each cta only covers a consecutive portion of the entire output. But it's // not going to save us from code divergence, and shared memory save is not // an issue neither, so just leave it as is for now. for (index_t i = thread_id; i < osizeH; i+= block_size) { r_kH_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeH, isizeH) - START_IND_INT(i, osizeH, isizeH)); } for (index_t i = thread_id; i < osizeW; i+= block_size) { r_kW_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeW, isizeW) - START_IND_INT(i, osizeW, isizeW)); } // each CTA handles a portion of a single slice on batch dimension; int batch_id = blockIdx.x % sizeB; int channel_id = blockIdx.x / sizeB; int channel_offset = threadIdx.x + channel_id * blockDim.x; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = scalar_t(0.0); } __syncthreads(); // each CTA handles a portion of a single slice on batch dimension; // We use gridDim.x to handle striding on C as well. gradInput = gradInput + batch_id * isizeH * isizeW * sizeC; gradOutput = gradOutput + batch_id * ostrideB; // split out_cached and exclusively it assigned to each thread; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x * kernel_size_C]; // iterate on input H & W. // Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on // tile so there's a better chance to hit L1 cache. index_t iH = (isizeH + gridDim.z-1) / gridDim.z; index_t iW = (isizeW + gridDim.y-1) / gridDim.y; index_t istartH = threadIdx.z + blockIdx.z*iH; index_t iendH = ::min(istartH+iH, isizeH); index_t istartW = threadIdx.y + blockIdx.y*iW; index_t iendW = ::min(istartW+iW, isizeW); // Stride for threads, each warp can reuse L1 as they go. So theoretically // better chance to survive cache eviction. for (index_t ih = istartH; ih < iendH; ih+=blockDim.z) { index_t ostartH = START_IND_INT(ih, isizeH, osizeH); index_t oendH = END_IND_INT(ih, isizeH, osizeH); for (index_t iw = istartW; iw < iendW; iw+=blockDim.y) { // loop on output: hierarchy h->w->c, so we could reuse weight factor f // because it remains the same for given oh & ow for(index_t oh = ostartH; oh < oendH; ++oh) { for(index_t ow = ostartW_cached[iw]; ow < oendW_cached[iw]; ++ow) { scalar_t f = r_kW_cached[ow] * r_kH_cached[oh]; const scalar_t* ptr_gradOutput = gradOutput + oh*ostrideH + ow*ostrideW; int cached_index = threadIdx.x; for (index_t c = channel_offset; c < sizeC; c += blockDim.x*kernel_stride_C) { out_cached[cached_index] += ptr_gradOutput[c*ostrideC] * f; cached_index += blockDim.x; } } } scalar_t *ptr_gradInput = gradInput + (ih * isizeW + iw) * sizeC; int cached_index = threadIdx.x; // write accumulated gradIput to global memory; for (index_t c = channel_offset; c < sizeC; c += blockDim.x*kernel_stride_C) { ptr_gradInput[c] = out_cached[cached_index]; out_cached[cached_index] = scalar_t(0.0); cached_index += blockDim.x; } // no need to __syncthreads() since out_cached is not shared. } } } // 4d tensor B x D x H x W void adaptive_avg_pool2d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef output_size) { TensorArg input_arg{ input, "input", 1 }, output_arg{ output, "output", 2 }; checkAllSameGPU(__func__, {input_arg, output_arg}); for (int64_t i = 1; i < input.ndimension(); i++) { TORCH_CHECK(input.size(i) > 0, "adaptive_avg_pool2d(): Expected input to have non-zero size for non-batch dimensions, " "but input has sizes ", input.sizes(), " with dimension ", i, " being " "empty"); } Tensor input_ = input; switch (input.suggest_memory_format()) { case at::MemoryFormat::ChannelsLast: { // special case for tensor memory format in channels_last TORCH_CHECK(input.ndimension() == 4, "adaptive_avg_pool2d(): Expected 4D tensor, but got ", input.sizes()); int sizeB = input_.size(0); int sizeC = input_.size(1); int isizeH = input_.size(2); int isizeW = input_.size(3); int64_t istrideB = input_.stride(0); int64_t istrideC = input_.stride(1); int64_t istrideH = input_.stride(2); int64_t istrideW = input_.stride(3); int osizeH = output_size[0]; int osizeW = output_size[1]; // preserve channels_last stride on output tensor; if (!output.is_contiguous(at::MemoryFormat::ChannelsLast)) { // TODO: modify this after resize_ added `memory_format` tag output.resize_({sizeB, sizeC, osizeH, osizeW}).as_strided_({sizeB, sizeC, osizeH, osizeW}, {sizeC*osizeH*osizeW, 1, osizeW*sizeC, sizeC}); } if (output.numel() == 0) { return; } const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize; size_t sharedMemPerBlock = at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock; // Launch kernel on output tensor elements. Logic behind launch config: // output tensor size NCHW, strides NHWC; // Launch on: // N -> grid.x // H -> grid.z * block.z // W -> grid.y * block.y // C -> block.x // encourage larger block_y & block_z for better cache hit while maintain // reasonable block_x for coalesced memory access; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(osizeW), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(osizeH), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = ceil_div(sizeC, block_x * 4); int kernel_size_C = ceil_div(sizeC, block_x * kernel_stride_C); // Do NOT clip grid_x, striding on Batch dimension is not in the kernel, // although it could be easily implemented given current kernel. int grid_x = sizeB*kernel_stride_C; // it's OK to clip grid_y & grid_z, as we block the two dimensions in the kernel; int grid_y = std::min<int>( maxGridSize[1], ceil_div(osizeW, block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( maxGridSize[2], ceil_div(osizeH, block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); // we are dealing with packed tensor here. max index is the same as numel. // TODO: to really support input tensor large enought to go beyond int32, // we will need to restrict out shared memory usage and adjust the launch // config; AT_ASSERT(input_.numel() < std::numeric_limits<int32_t>::max()); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input_.scalar_type(), "adaptive_avg_pool2d_nhwc_cuda", [&] { size_t shmem_size = (kernel_size_C * block_x * block_y * block_z) * sizeof(scalar_t); AT_ASSERT(shmem_size <= sharedMemPerBlock); hipLaunchKernelGGL(( adaptive_average_pool_nhwc<int32_t>), dim3(grid), dim3(block), shmem_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), sizeB, sizeC, isizeH, isizeW, osizeH, osizeW, kernel_stride_C, kernel_size_C, istrideB, istrideC, istrideH, istrideW); C10_HIP_KERNEL_LAUNCH_CHECK(); } ); break; } case at::MemoryFormat::Contiguous: { TORCH_CHECK((input.ndimension() == 3 || input.ndimension() == 4), "adaptive_avg_pool2d(): Expected 3D or 4D tensor, but got ", input.sizes()); int64_t grid_x = input.size(-3); if (input.ndimension() == 4) { input_ = input.contiguous(); grid_x *= input_.size(-4); } int64_t sizeD = input_.size(-3); int64_t isizeH = input_.size(-2); int64_t isizeW = input_.size(-1); int64_t istrideD = input_.stride(-3); int64_t istrideH = input_.stride(-2); int64_t istrideW = input_.stride(-1); int64_t osizeH = output_size[0]; int64_t osizeW = output_size[1]; if (input.ndimension() == 4) { output.resize_({input_.size(-4), sizeD, osizeH, osizeW}); } else { output.resize_({sizeD, osizeH, osizeW}); } if (output.numel() == 0) { return; } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input_.scalar_type(), "adaptive_avg_pool2d_cuda", [&] { scalar_t *input_data = input_.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); // cuda blocks & threads: int blocksH = std::max<int64_t>((int)(16L / sizeD), 1); dim3 blocks(grid_x, blocksH); dim3 threads(32, 8); // run averagepool kernel hipLaunchKernelGGL(( adaptive_average_pool) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); C10_HIP_KERNEL_LAUNCH_CHECK(); } ); break; } default: TORCH_CHECK( false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } void adaptive_avg_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input) { TensorArg grad_input_arg{ gradInput, "gradInput", 1 }, grad_output_arg{ gradOutput_, "gradOutput_", 2 }, input_arg{ input, "input", 3 }; checkAllSameGPU(__func__, {grad_input_arg, grad_output_arg, input_arg}); switch (input.suggest_memory_format()) { case at::MemoryFormat::ChannelsLast: { // special case for tensor memory format in channels_last TORCH_CHECK(input.ndimension() == 4, "adaptive_avg_pool2d_backward_cuda(): Expected 4D tensor, but got ", input.ndimension()); int sizeB = input.size(0); int sizeC = input.size(1); int isizeH = input.size(2); int isizeW = input.size(3); Tensor gradOutput = gradOutput_; int64_t ostrideB = gradOutput.stride(0); int64_t ostrideC = gradOutput.stride(1); int64_t ostrideH = gradOutput.stride(2); int64_t ostrideW = gradOutput.stride(3); int osizeH = gradOutput.size(-2); int osizeW = gradOutput.size(-1); // preserve channels_last stride on input tensor; if (!gradInput.is_contiguous(at::MemoryFormat::ChannelsLast)) { gradInput.as_strided_( {sizeB, sizeC, isizeH, isizeW}, {sizeC*isizeH*isizeW, 1, isizeW*sizeC, sizeC}); } const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize; size_t sharedMemPerBlock = at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock; // Launch kernel on input tensor elements. Logic behind launch config: // input tensor size NCHW, strides NHWC; // Launch on: // N(C) -> grid.x (striding on C to reduce sh_mem usage) // H -> grid.z * block.z // W -> grid.y * block.y // C -> block.x // encourage larger block_y & block_z for better cache hit while maintain // reasonable block_x for coalesced memory access; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(isizeW), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(isizeH), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = ceil_div(sizeC, block_x * 4); int kernel_size_C = ceil_div(sizeC, block_x * kernel_stride_C); // Do NOT clip grid_x, striding on Batch dimension is not in the kernel, // although it could be easily implemented given current kernel. int grid_x = sizeB*kernel_stride_C; // it's OK to clip grid_y & grid_z, as we block the two dimensions in the kernel; int grid_y = std::min<int>( maxGridSize[1], ceil_div(isizeW, block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( maxGridSize[2], ceil_div(isizeH, block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); // we are dealing with packed tensor here. max index is the same as numel. // TODO: to really support input tensor large enought to go beyond int32, // we will need to restrict out shared memory usage and adjust the launch // config; AT_ASSERT(input.numel() < std::numeric_limits<int32_t>::max()); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool2d_backward_nhwc_cuda", [&] { size_t shmem_size = (kernel_size_C * block_x * block_y * block_z + osizeH + osizeW) * sizeof(scalar_t) + 2 * isizeW * sizeof(int32_t); AT_ASSERT(shmem_size <= sharedMemPerBlock); hipLaunchKernelGGL(( adaptive_average_gradinput_nhwc<int32_t>), dim3(grid), dim3(block), shmem_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput.data_ptr<scalar_t>(), gradOutput.data_ptr<scalar_t>(), sizeB, sizeC, isizeH, isizeW, osizeH, osizeW, kernel_stride_C, kernel_size_C, ostrideB, ostrideC, ostrideH, ostrideW); C10_HIP_KERNEL_LAUNCH_CHECK(); } ); break; } case at::MemoryFormat::Contiguous: { bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests Tensor gradOutput = gradOutput_.contiguous(); int64_t sizeD = input.size(-3); int64_t isizeH = input.size(-2); int64_t isizeW = input.size(-1); int64_t osizeH = gradOutput.size(-2); int64_t osizeW = gradOutput.size(-1); int64_t grid_x = sizeD; if (input.ndimension() == 4) grid_x *= input.size(-4); //bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool2d_backward_cuda", [&] { scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); // cuda blocks & threads: int blocksH = ::max((int)(16L / sizeD), 1); dim3 blocks(grid_x, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomic_adaptive_average_gradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { // run updateGradInput kernel hipLaunchKernelGGL(( adaptive_average_gradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); C10_HIP_KERNEL_LAUNCH_CHECK(); } } ); break; } default: TORCH_CHECK( false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } } // namespace Tensor& adaptive_avg_pool2d_out_cuda( const Tensor& input, IntArrayRef output_size, Tensor& output) { adaptive_avg_pool2d_out_cuda_template( output, input, output_size); return output; } Tensor adaptive_avg_pool2d_cuda( at::Tensor const& input, IntArrayRef output_size) { auto output = at::empty({0}, input.options()); adaptive_avg_pool2d_out_cuda_template( output, input, output_size); return output; } Tensor& adaptive_avg_pool2d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool2d_backward_out_cuda"); gradInput.resize_as_(input); if (gradInput.numel() != 0) { adaptive_avg_pool2d_backward_out_cuda_template( gradInput, gradOutput, input); } return gradInput; } Tensor adaptive_avg_pool2d_backward_cuda( const Tensor& gradOutput, const Tensor& input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool2d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); if (gradInput.numel() != 0) { adaptive_avg_pool2d_backward_out_cuda_template( gradInput, gradOutput, input); } return gradInput; } } // at::native } // at #undef BLOCK_STRIDE #undef CUDA_MAX_THREADS #undef START_IND #undef END_IND
bbd1cf9287ff73b132d91a8bc0dcf000165b8c41.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/ceil_div.h> #include <ATen/Dispatch.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <ATen/native/cuda/LaunchUtils.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/_adaptive_avg_pool2d_backward_native.h> #include <ATen/ops/_adaptive_avg_pool2d_native.h> #include <ATen/ops/empty.h> #include <ATen/ops/zeros_like.h> #endif #include <algorithm> #include <cfloat> #include <cmath> #define START_IND(a,b,c) (int)std::floor((float)(a * c) / b) #define END_IND(a,b,c) (int)std::ceil((float)((a + 1) * c) / b) #define START_IND_INT(a,b,c) ((a * c) / b) #define END_IND_INT(a,b,c) (((a + 1) * c + b - 1) / b) // #define START_IND(a,b,c) a * c / b // #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0 #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit #define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched namespace at { namespace native { namespace { // 4d tensor B x D x H x W // All kernels view batch dim B and feature dim D as collapsed. /* * Description: * this function adaptively average pools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output */ template <typename T> __global__ void adaptive_average_pool(T *input, T *output, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators on output pixels int oh, ow; // select input/output plane based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; output = output + o_plane*osizeH*osizeW; input = input + i_plane*istrideD; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y*gridDim.y; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling over corresponding input pixels T *ptr_input = input + istartH*istrideH + istartW*istrideW; T *ptr_output = output + oh*osizeW + ow; T sum = static_cast<T>(0); int ih, iw; for(ih = 0; ih < kH; ++ih) { for(iw = 0; iw < kW; ++iw) { T val = ptr_input[iw*istrideW]; sum += val; } ptr_input += istrideH; // next input line } // Update output *ptr_output = sum / kH / kW; } } } /* * Description: * this function computes the gradInput from gradOutput */ template <typename T> __global__ void adaptive_average_gradinput( T *gradInput, T *gradOutput, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators on input pixels int ih, iw; // select input/output plane based on thread/block ID int i_plane = blockIdx.x; int o_plane = i_plane; gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; int istartH = blockDim.y*blockIdx.y + threadIdx.y; int iendH = isizeH; int istepH = blockDim.y*gridDim.y; int istartW = threadIdx.x; int iendW = isizeW; int istepW = blockDim.x; // compute gradInput for(ih = istartH; ih < iendH; ih += istepH) { int ostartH = START_IND(ih, isizeH, osizeH); int oendH = END_IND(ih, isizeH, osizeH); for(iw = istartW; iw < iendW; iw += istepW) { int ostartW = START_IND(iw, isizeW, osizeW); int oendW = END_IND(iw, isizeW, osizeW); // Compute the gradients over corresponding output pixels T *ptr_gradInput = gradInput + ih*isizeW + iw; int oh, ow; for(oh = ostartH; oh < oendH; ++oh) { int kH = START_IND(oh, osizeH, isizeH) - END_IND(oh, osizeH, isizeH); for(ow = ostartW; ow < oendW; ++ow) { int kW = START_IND(ow, osizeW, isizeW) - END_IND(ow, osizeW, isizeW); T grad_delta = gradOutput[ow + oh*osizeW] / kH / kW; *ptr_gradInput += grad_delta; } } } } } /* * Description: * this function computes the gradInput from gradOutput * (uses atomic add) */ template <typename T> __global__ void atomic_adaptive_average_gradinput( T *gradInput, T *gradOutput, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators on output indices int oh, ow; // select input/output plane based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; gradOutput = gradOutput + o_plane*osizeW*osizeH; gradInput = gradInput + i_plane*isizeW*isizeH; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the gradients for over corresponding input pixels T *ptr_gradInput = gradInput + istartH*isizeW + istartW; T *ptr_gradOutput = gradOutput + oh*osizeW + ow; T grad_delta = *ptr_gradOutput / kW / kH; int ih, iw; for(ih = 0; ih < kH; ++ih) { for(iw = 0; iw < kW; ++iw) { // atomic add since different threads could update same variable gpuAtomicAddNoReturn(&(ptr_gradInput[iw]), grad_delta); } ptr_gradInput += isizeW; // next input line } } } } /* * Description: * this function adaptively average pools an input 4D tensor along dimensions 2 and 3 * NHWC layout for both input and output tensor * 4D input, 4D output */ template <typename index_t, typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void adaptive_average_pool_nhwc(const scalar_t* __restrict__ input, scalar_t* __restrict__ output, int sizeB, int sizeC, int isizeH, int isizeW, int osizeH, int osizeW, int kernel_stride_C, int kernel_size_C, index_t istrideB, index_t istrideC, index_t istrideH, index_t istrideW) { extern __shared__ int smem[]; scalar_t *out_cached = reinterpret_cast<scalar_t*>(smem); // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = scalar_t(0.0); } __syncthreads(); // each CTA handles a portion of a single slice on batch dimension; int batch_id = blockIdx.x % sizeB; int channel_id = blockIdx.x / sizeB; int channel_offset = threadIdx.x + channel_id * blockDim.x; // each CTA handles a single slice on batch dimension; // We use gridDim.x to handle striding on C as well. output = output + batch_id * osizeH * osizeW * sizeC; input = input + batch_id * istrideB; // split out_cached and exclusively it assigned to each thread; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C * blockDim.x]; // iterate on output H & W. // Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on // tile so there's a better chance to hit L1 cache. index_t oH = (osizeH + gridDim.z-1) / gridDim.z; index_t oW = (osizeW + gridDim.y-1) / gridDim.y; index_t ostartH = threadIdx.z + blockIdx.z*oH; index_t oendH = ::min(ostartH+oH, osizeH); index_t ostartW = threadIdx.y + blockIdx.y*oW; index_t oendW = ::min(ostartW+oW, osizeW); // Stride for threads, each warp can reuse L1 as they go. So theoretically // better chance to survive cache eviction. for (int oh = ostartH; oh < oendH; oh+=blockDim.z) { int istartH = START_IND_INT(oh, osizeH, isizeH); int iendH = END_IND_INT(oh, osizeH, isizeH); for (int ow = ostartW; ow < oendW; ow+=blockDim.y) { int istartW = START_IND_INT(ow, osizeW, isizeW); int iendW = END_IND_INT(ow, osizeW, isizeW); scalar_t factor = scalar_t(1.0) / ((iendH-istartH) * (iendW-istartW)); // loop on input: hierarchy h->w->c, use shared memory here hopefully // would not stall global memory read; for (index_t ih = istartH; ih < iendH; ih++) { for (index_t iw = istartW; iw < iendW; iw++) { int cached_index = threadIdx.x; const scalar_t *ptr_input = input + ih*istrideH + iw*istrideW; for (index_t c = channel_offset; c < sizeC; c += blockDim.x*kernel_stride_C) { out_cached[cached_index] += ptr_input[c*istrideC]; cached_index += blockDim.x; } } } scalar_t *ptr_output = output + (oh * osizeW + ow) * sizeC; int cached_index = threadIdx.x; // write accumulated output to global memory; for (index_t c = channel_offset; c < sizeC; c += blockDim.x*kernel_stride_C) { // This causes numerical issueptr when unit test with NCHW kernel; // switch to could verify the correctness; // output[c] = out_cached[c] / (iendH-istartH) / (iendW-istartW); ptr_output[c] = out_cached[cached_index] * factor; out_cached[cached_index] = scalar_t(0.0); cached_index += blockDim.x; } // no need to __syncthreads() since out_cached is not shared. } } } /* * Description: * this function computes the gradInput from gradOutput * NHWC layout for both input and output tensor * 4D input, 4D output */ template <typename index_t, typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void adaptive_average_gradinput_nhwc(scalar_t* __restrict__ gradInput, const scalar_t* __restrict__ gradOutput, int sizeB, int sizeC, int isizeH, int isizeW, int osizeH, int osizeW, int kernel_stride_C, int kernel_size_C, index_t ostrideB, index_t ostrideC, index_t ostrideH, index_t ostrideW) { extern __shared__ int smem[]; index_t *ostartW_cached = smem; index_t *oendW_cached = &ostartW_cached[isizeW]; // be careful with alignment, in case scalar_t is fp16, we want to assign // int pointers first. scalar_t *r_kW_cached = reinterpret_cast<scalar_t*>(&oendW_cached[isizeW]); scalar_t *r_kH_cached = &r_kW_cached[osizeW]; scalar_t *out_cached = &r_kH_cached[osizeH]; // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // Precompute output start/end index per input index on width dimension; // Not doing this for height dimension, as that's our out-most loop. for (index_t i = thread_id; i < isizeW; i+= block_size) { ostartW_cached[i] = START_IND_INT(i, isizeW, osizeW); oendW_cached[i] = END_IND_INT(i, isizeW, osizeW); } // Precompute pooling height/weight factor for each output element; // This is used to weight output gradient when accumulate them on input // gradient. // Technically we don't have to compute it for the whole `osizeH`, since // each cta only covers a consecutive portion of the entire output. But it's // not going to save us from code divergence, and shared memory save is not // an issue neither, so just leave it as is for now. for (index_t i = thread_id; i < osizeH; i+= block_size) { r_kH_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeH, isizeH) - START_IND_INT(i, osizeH, isizeH)); } for (index_t i = thread_id; i < osizeW; i+= block_size) { r_kW_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeW, isizeW) - START_IND_INT(i, osizeW, isizeW)); } // each CTA handles a portion of a single slice on batch dimension; int batch_id = blockIdx.x % sizeB; int channel_id = blockIdx.x / sizeB; int channel_offset = threadIdx.x + channel_id * blockDim.x; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = scalar_t(0.0); } __syncthreads(); // each CTA handles a portion of a single slice on batch dimension; // We use gridDim.x to handle striding on C as well. gradInput = gradInput + batch_id * isizeH * isizeW * sizeC; gradOutput = gradOutput + batch_id * ostrideB; // split out_cached and exclusively it assigned to each thread; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x * kernel_size_C]; // iterate on input H & W. // Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on // tile so there's a better chance to hit L1 cache. index_t iH = (isizeH + gridDim.z-1) / gridDim.z; index_t iW = (isizeW + gridDim.y-1) / gridDim.y; index_t istartH = threadIdx.z + blockIdx.z*iH; index_t iendH = ::min(istartH+iH, isizeH); index_t istartW = threadIdx.y + blockIdx.y*iW; index_t iendW = ::min(istartW+iW, isizeW); // Stride for threads, each warp can reuse L1 as they go. So theoretically // better chance to survive cache eviction. for (index_t ih = istartH; ih < iendH; ih+=blockDim.z) { index_t ostartH = START_IND_INT(ih, isizeH, osizeH); index_t oendH = END_IND_INT(ih, isizeH, osizeH); for (index_t iw = istartW; iw < iendW; iw+=blockDim.y) { // loop on output: hierarchy h->w->c, so we could reuse weight factor f // because it remains the same for given oh & ow for(index_t oh = ostartH; oh < oendH; ++oh) { for(index_t ow = ostartW_cached[iw]; ow < oendW_cached[iw]; ++ow) { scalar_t f = r_kW_cached[ow] * r_kH_cached[oh]; const scalar_t* ptr_gradOutput = gradOutput + oh*ostrideH + ow*ostrideW; int cached_index = threadIdx.x; for (index_t c = channel_offset; c < sizeC; c += blockDim.x*kernel_stride_C) { out_cached[cached_index] += ptr_gradOutput[c*ostrideC] * f; cached_index += blockDim.x; } } } scalar_t *ptr_gradInput = gradInput + (ih * isizeW + iw) * sizeC; int cached_index = threadIdx.x; // write accumulated gradIput to global memory; for (index_t c = channel_offset; c < sizeC; c += blockDim.x*kernel_stride_C) { ptr_gradInput[c] = out_cached[cached_index]; out_cached[cached_index] = scalar_t(0.0); cached_index += blockDim.x; } // no need to __syncthreads() since out_cached is not shared. } } } // 4d tensor B x D x H x W void adaptive_avg_pool2d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef output_size) { TensorArg input_arg{ input, "input", 1 }, output_arg{ output, "output", 2 }; checkAllSameGPU(__func__, {input_arg, output_arg}); for (int64_t i = 1; i < input.ndimension(); i++) { TORCH_CHECK(input.size(i) > 0, "adaptive_avg_pool2d(): Expected input to have non-zero size for non-batch dimensions, " "but input has sizes ", input.sizes(), " with dimension ", i, " being " "empty"); } Tensor input_ = input; switch (input.suggest_memory_format()) { case at::MemoryFormat::ChannelsLast: { // special case for tensor memory format in channels_last TORCH_CHECK(input.ndimension() == 4, "adaptive_avg_pool2d(): Expected 4D tensor, but got ", input.sizes()); int sizeB = input_.size(0); int sizeC = input_.size(1); int isizeH = input_.size(2); int isizeW = input_.size(3); int64_t istrideB = input_.stride(0); int64_t istrideC = input_.stride(1); int64_t istrideH = input_.stride(2); int64_t istrideW = input_.stride(3); int osizeH = output_size[0]; int osizeW = output_size[1]; // preserve channels_last stride on output tensor; if (!output.is_contiguous(at::MemoryFormat::ChannelsLast)) { // TODO: modify this after resize_ added `memory_format` tag output.resize_({sizeB, sizeC, osizeH, osizeW}).as_strided_({sizeB, sizeC, osizeH, osizeW}, {sizeC*osizeH*osizeW, 1, osizeW*sizeC, sizeC}); } if (output.numel() == 0) { return; } const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize; size_t sharedMemPerBlock = at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock; // Launch kernel on output tensor elements. Logic behind launch config: // output tensor size NCHW, strides NHWC; // Launch on: // N -> grid.x // H -> grid.z * block.z // W -> grid.y * block.y // C -> block.x // encourage larger block_y & block_z for better cache hit while maintain // reasonable block_x for coalesced memory access; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(osizeW), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(osizeH), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = ceil_div(sizeC, block_x * 4); int kernel_size_C = ceil_div(sizeC, block_x * kernel_stride_C); // Do NOT clip grid_x, striding on Batch dimension is not in the kernel, // although it could be easily implemented given current kernel. int grid_x = sizeB*kernel_stride_C; // it's OK to clip grid_y & grid_z, as we block the two dimensions in the kernel; int grid_y = std::min<int>( maxGridSize[1], ceil_div(osizeW, block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( maxGridSize[2], ceil_div(osizeH, block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); // we are dealing with packed tensor here. max index is the same as numel. // TODO: to really support input tensor large enought to go beyond int32, // we will need to restrict out shared memory usage and adjust the launch // config; AT_ASSERT(input_.numel() < std::numeric_limits<int32_t>::max()); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input_.scalar_type(), "adaptive_avg_pool2d_nhwc_cuda", [&] { size_t shmem_size = (kernel_size_C * block_x * block_y * block_z) * sizeof(scalar_t); AT_ASSERT(shmem_size <= sharedMemPerBlock); adaptive_average_pool_nhwc<int32_t><<<grid, block, shmem_size, at::cuda::getCurrentCUDAStream()>>> ( input_.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), sizeB, sizeC, isizeH, isizeW, osizeH, osizeW, kernel_stride_C, kernel_size_C, istrideB, istrideC, istrideH, istrideW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } ); break; } case at::MemoryFormat::Contiguous: { TORCH_CHECK((input.ndimension() == 3 || input.ndimension() == 4), "adaptive_avg_pool2d(): Expected 3D or 4D tensor, but got ", input.sizes()); int64_t grid_x = input.size(-3); if (input.ndimension() == 4) { input_ = input.contiguous(); grid_x *= input_.size(-4); } int64_t sizeD = input_.size(-3); int64_t isizeH = input_.size(-2); int64_t isizeW = input_.size(-1); int64_t istrideD = input_.stride(-3); int64_t istrideH = input_.stride(-2); int64_t istrideW = input_.stride(-1); int64_t osizeH = output_size[0]; int64_t osizeW = output_size[1]; if (input.ndimension() == 4) { output.resize_({input_.size(-4), sizeD, osizeH, osizeW}); } else { output.resize_({sizeD, osizeH, osizeW}); } if (output.numel() == 0) { return; } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input_.scalar_type(), "adaptive_avg_pool2d_cuda", [&] { scalar_t *input_data = input_.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); // cuda blocks & threads: int blocksH = std::max<int64_t>((int)(16L / sizeD), 1); dim3 blocks(grid_x, blocksH); dim3 threads(32, 8); // run averagepool kernel adaptive_average_pool <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( input_data, output_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } ); break; } default: TORCH_CHECK( false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } void adaptive_avg_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input) { TensorArg grad_input_arg{ gradInput, "gradInput", 1 }, grad_output_arg{ gradOutput_, "gradOutput_", 2 }, input_arg{ input, "input", 3 }; checkAllSameGPU(__func__, {grad_input_arg, grad_output_arg, input_arg}); switch (input.suggest_memory_format()) { case at::MemoryFormat::ChannelsLast: { // special case for tensor memory format in channels_last TORCH_CHECK(input.ndimension() == 4, "adaptive_avg_pool2d_backward_cuda(): Expected 4D tensor, but got ", input.ndimension()); int sizeB = input.size(0); int sizeC = input.size(1); int isizeH = input.size(2); int isizeW = input.size(3); Tensor gradOutput = gradOutput_; int64_t ostrideB = gradOutput.stride(0); int64_t ostrideC = gradOutput.stride(1); int64_t ostrideH = gradOutput.stride(2); int64_t ostrideW = gradOutput.stride(3); int osizeH = gradOutput.size(-2); int osizeW = gradOutput.size(-1); // preserve channels_last stride on input tensor; if (!gradInput.is_contiguous(at::MemoryFormat::ChannelsLast)) { gradInput.as_strided_( {sizeB, sizeC, isizeH, isizeW}, {sizeC*isizeH*isizeW, 1, isizeW*sizeC, sizeC}); } const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize; size_t sharedMemPerBlock = at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock; // Launch kernel on input tensor elements. Logic behind launch config: // input tensor size NCHW, strides NHWC; // Launch on: // N(C) -> grid.x (striding on C to reduce sh_mem usage) // H -> grid.z * block.z // W -> grid.y * block.y // C -> block.x // encourage larger block_y & block_z for better cache hit while maintain // reasonable block_x for coalesced memory access; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(isizeW), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(isizeH), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = ceil_div(sizeC, block_x * 4); int kernel_size_C = ceil_div(sizeC, block_x * kernel_stride_C); // Do NOT clip grid_x, striding on Batch dimension is not in the kernel, // although it could be easily implemented given current kernel. int grid_x = sizeB*kernel_stride_C; // it's OK to clip grid_y & grid_z, as we block the two dimensions in the kernel; int grid_y = std::min<int>( maxGridSize[1], ceil_div(isizeW, block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( maxGridSize[2], ceil_div(isizeH, block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); // we are dealing with packed tensor here. max index is the same as numel. // TODO: to really support input tensor large enought to go beyond int32, // we will need to restrict out shared memory usage and adjust the launch // config; AT_ASSERT(input.numel() < std::numeric_limits<int32_t>::max()); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool2d_backward_nhwc_cuda", [&] { size_t shmem_size = (kernel_size_C * block_x * block_y * block_z + osizeH + osizeW) * sizeof(scalar_t) + 2 * isizeW * sizeof(int32_t); AT_ASSERT(shmem_size <= sharedMemPerBlock); adaptive_average_gradinput_nhwc<int32_t><<<grid, block, shmem_size, at::cuda::getCurrentCUDAStream()>>> ( gradInput.data_ptr<scalar_t>(), gradOutput.data_ptr<scalar_t>(), sizeB, sizeC, isizeH, isizeW, osizeH, osizeW, kernel_stride_C, kernel_size_C, ostrideB, ostrideC, ostrideH, ostrideW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } ); break; } case at::MemoryFormat::Contiguous: { bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests Tensor gradOutput = gradOutput_.contiguous(); int64_t sizeD = input.size(-3); int64_t isizeH = input.size(-2); int64_t isizeW = input.size(-1); int64_t osizeH = gradOutput.size(-2); int64_t osizeW = gradOutput.size(-1); int64_t grid_x = sizeD; if (input.ndimension() == 4) grid_x *= input.size(-4); //bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool2d_backward_cuda", [&] { scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); // cuda blocks & threads: int blocksH = std::max((int)(16L / sizeD), 1); dim3 blocks(grid_x, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically atomic_adaptive_average_gradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { // run updateGradInput kernel adaptive_average_gradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } ); break; } default: TORCH_CHECK( false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } } // namespace Tensor& adaptive_avg_pool2d_out_cuda( const Tensor& input, IntArrayRef output_size, Tensor& output) { adaptive_avg_pool2d_out_cuda_template( output, input, output_size); return output; } Tensor adaptive_avg_pool2d_cuda( at::Tensor const& input, IntArrayRef output_size) { auto output = at::empty({0}, input.options()); adaptive_avg_pool2d_out_cuda_template( output, input, output_size); return output; } Tensor& adaptive_avg_pool2d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool2d_backward_out_cuda"); gradInput.resize_as_(input); if (gradInput.numel() != 0) { adaptive_avg_pool2d_backward_out_cuda_template( gradInput, gradOutput, input); } return gradInput; } Tensor adaptive_avg_pool2d_backward_cuda( const Tensor& gradOutput, const Tensor& input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool2d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); if (gradInput.numel() != 0) { adaptive_avg_pool2d_backward_out_cuda_template( gradInput, gradOutput, input); } return gradInput; } } // at::native } // at #undef BLOCK_STRIDE #undef CUDA_MAX_THREADS #undef START_IND #undef END_IND
46112c63d4ad298b603945c65055887c1a191e5d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 2048 * 2048 // Number of elements in each vector /* * Optimize this already-accelerated codebase. Work iteratively, * and use nsys to support your work. * * Aim to profile `saxpy` (without modifying `N`) running under * 20us. * * Some bugs have been placed in this codebase for your edification. */ __global__ void saxpy(float * a, float * b, float * c) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { //result[i] = a[i] + b[i]; c[i] = 2 * a[i] + b[i]; } } int main() { float *a, *b, *c; int size = N * sizeof (int); // The total number of bytes per vector int deviceId; int numberOfSMs; hipGetDevice(&deviceId); hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId); hipMallocManaged(&a, size); hipMallocManaged(&b, size); hipMallocManaged(&c, size); // Initialize memory for( int i = 0; i < N; ++i ) { a[i] = 2; b[i] = 1; c[i] = 0; } hipMemPrefetchAsync(a, size, deviceId); hipMemPrefetchAsync(b, size, deviceId); hipMemPrefetchAsync(c, size, deviceId); int threads_per_block = 256; int number_of_blocks = numberOfSMs * 32; hipLaunchKernelGGL(( saxpy) , dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, b, c ); hipDeviceSynchronize(); // Print out the first and last 5 values of c for a quality check for( int i = 0; i < 5; ++i ) printf("c[%d] = %f, ", i, c[i]); printf ("\n"); for( int i = N-5; i < N; ++i ) printf("c[%d] = %f, ", i, c[i]); printf ("\n"); hipFree( a ); hipFree( b ); hipFree( c ); }
46112c63d4ad298b603945c65055887c1a191e5d.cu
#include <stdio.h> #define N 2048 * 2048 // Number of elements in each vector /* * Optimize this already-accelerated codebase. Work iteratively, * and use nsys to support your work. * * Aim to profile `saxpy` (without modifying `N`) running under * 20us. * * Some bugs have been placed in this codebase for your edification. */ __global__ void saxpy(float * a, float * b, float * c) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { //result[i] = a[i] + b[i]; c[i] = 2 * a[i] + b[i]; } } int main() { float *a, *b, *c; int size = N * sizeof (int); // The total number of bytes per vector int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); // Initialize memory for( int i = 0; i < N; ++i ) { a[i] = 2; b[i] = 1; c[i] = 0; } cudaMemPrefetchAsync(a, size, deviceId); cudaMemPrefetchAsync(b, size, deviceId); cudaMemPrefetchAsync(c, size, deviceId); int threads_per_block = 256; int number_of_blocks = numberOfSMs * 32; saxpy <<<number_of_blocks, threads_per_block>>>( a, b, c ); cudaDeviceSynchronize(); // Print out the first and last 5 values of c for a quality check for( int i = 0; i < 5; ++i ) printf("c[%d] = %f, ", i, c[i]); printf ("\n"); for( int i = N-5; i < N; ++i ) printf("c[%d] = %f, ", i, c[i]); printf ("\n"); cudaFree( a ); cudaFree( b ); cudaFree( c ); }
036ba9525cc0b0e55b15b80c8bc907216cafe672.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void lshift_kernel_cuda(TensorIteratorBase& iter) { if (iter.dtype() == ScalarType::Float || iter.dtype() == ScalarType::Double || iter.dtype() == ScalarType::Half || iter.dtype() == ScalarType::BFloat16) { AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "lshift_cuda", [&]() { gpu_kernel_with_scalars( iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * ::pow(static_cast<scalar_t>(2), b); }); }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "lshift_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return static_cast<std::make_unsigned_t<scalar_t>>(a) << b; }); }); } } void rshift_kernel_cuda(TensorIteratorBase& iter) { if (iter.dtype() == ScalarType::Float || iter.dtype() == ScalarType::Double || iter.dtype() == ScalarType::Half || iter.dtype() == ScalarType::BFloat16) { AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "rshift_cuda", [&]() { gpu_kernel_with_scalars( iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a / ::pow(static_cast<scalar_t>(2), b); }); }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "rshift_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a >> b; }); }); } } REGISTER_DISPATCH(lshift_stub, &lshift_kernel_cuda); REGISTER_DISPATCH(rshift_stub, &rshift_kernel_cuda); }} // namespace at::native
036ba9525cc0b0e55b15b80c8bc907216cafe672.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void lshift_kernel_cuda(TensorIteratorBase& iter) { if (iter.dtype() == ScalarType::Float || iter.dtype() == ScalarType::Double || iter.dtype() == ScalarType::Half || iter.dtype() == ScalarType::BFloat16) { AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "lshift_cuda", [&]() { gpu_kernel_with_scalars( iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * std::pow(static_cast<scalar_t>(2), b); }); }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "lshift_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return static_cast<std::make_unsigned_t<scalar_t>>(a) << b; }); }); } } void rshift_kernel_cuda(TensorIteratorBase& iter) { if (iter.dtype() == ScalarType::Float || iter.dtype() == ScalarType::Double || iter.dtype() == ScalarType::Half || iter.dtype() == ScalarType::BFloat16) { AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "rshift_cuda", [&]() { gpu_kernel_with_scalars( iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a / std::pow(static_cast<scalar_t>(2), b); }); }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "rshift_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a >> b; }); }); } } REGISTER_DISPATCH(lshift_stub, &lshift_kernel_cuda); REGISTER_DISPATCH(rshift_stub, &rshift_kernel_cuda); }} // namespace at::native